예제 #1
0
def test_linfit_tied():
    p0, parinfo, fa = generate_toy_model()

    # This is a bad tie, but its to test it works
    parinfo[0]["tied"] = "2 * p[1]"

    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    res = m.optimize_result

    assert res.success
    assert res.dof == 9
    assert res.status == 1

    np.testing.assert_allclose(m.params[0] * 0.5, m.params[1], rtol=5e-7)
    np.testing.assert_allclose(m.params,
                               np.array([1.60881708, 0.80440854]),
                               rtol=5e-7)
    np.testing.assert_allclose(m.perror,
                               np.array([0.0, 0.00990717]),
                               rtol=5e-7)

    chisq = np.sum(
        myfunctlin(m.params, x=fa["x"], y=fa["y"], err=fa["err"])[1]**2)
    np.testing.assert_allclose(np.array([chisq]),
                               np.array([25465.436783]),
                               rtol=5e-7)
예제 #2
0
def test_linfit():
    x = N.array([-1.7237128E+00, 1.8712276E+00, -9.6608055E-01,
                 -2.8394297E-01, 1.3416969E+00, 1.3757038E+00,
                 -1.3703436E+00, 4.2581975E-02, -1.4970151E-01,
                 8.2065094E-01])
    y = N.array([1.9000429E-01, 6.5807428E+00, 1.4582725E+00,
                 2.7270851E+00, 5.5969253E+00, 5.6249280E+00,
                 0.787615, 3.2599759E+00, 2.9771762E+00,
                 4.5936475E+00])
    ey = 0.07 * N.ones(y.shape, dtype='float64')
    p0 = N.array([1.0, 1.0], dtype='float64')  # initial conditions
    pactual = N.array([3.2, 1.78])  # actual values used to make data
    parbase = {'value': 0., 'fixed': 0, 'limited': [0, 0], 'limits': [0., 0.]}
    parinfo = []
    for i in range(len(pactual)):
        parinfo.append(copy.deepcopy(parbase))
    for i in range(len(pactual)):
        parinfo[i]['value'] = p0[i]
    fa = {'x': x, 'y': y, 'err': ey}
    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    if m.status <= 0:
        print 'error message = ', m.errmsg
    assert N.allclose(
        m.params, N.array([3.20996572, -1.7709542], dtype='float64'))
    assert N.allclose(
        m.perror, N.array([0.02221018, 0.01893756], dtype='float64'))
    chisq = (myfunctlin(m.params, x=x, y=y, err=ey)[1] ** 2).sum()

    assert N.allclose(
        N.array(
            [chisq], dtype='float64'), N.array(
            [2.756284983], dtype='float64'))
    assert m.dof == 8
    return
예제 #3
0
def test_linfit_bounds():
    p0, parinfo, fa = generate_toy_model()

    # This is a bad bound, but its to test it works
    parinfo[0]["limits"] = [1.5, 1.8]
    parinfo[0]["limited"] = [0, 1]

    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    res = m.optimize_result

    assert res.success
    assert res.dof == 8
    assert res.status == 1

    assert m.params[0] >= 1.5 and m.params[0] <= 1.8
    np.testing.assert_allclose(m.params,
                               np.array([1.8, -1.86916384]),
                               rtol=5e-7)
    np.testing.assert_allclose(m.perror,
                               np.array([0.0, 0.01887426]),
                               rtol=5e-7)

    chisq = np.sum(
        myfunctlin(m.params, x=fa["x"], y=fa["y"], err=fa["err"])[1]**2)
    print(chisq)
    np.testing.assert_allclose(np.array([chisq]),
                               np.array([4032.830936495]),
                               rtol=5e-7)
예제 #4
0
def test_linfit():
    x = N.array([-1.7237128E+00, 1.8712276E+00, -9.6608055E-01,
                 -2.8394297E-01, 1.3416969E+00, 1.3757038E+00,
                 -1.3703436E+00, 4.2581975E-02, -1.4970151E-01,
                 8.2065094E-01])
    y = N.array([1.9000429E-01, 6.5807428E+00, 1.4582725E+00,
                 2.7270851E+00, 5.5969253E+00, 5.6249280E+00,
                 0.787615, 3.2599759E+00, 2.9771762E+00,
                 4.5936475E+00])
    ey = 0.07 * N.ones(y.shape, dtype='float64')
    p0 = N.array([1.0, 1.0], dtype='float64')  # initial conditions
    pactual = N.array([3.2, 1.78])  # actual values used to make data
    parbase = {'value': 0., 'fixed': 0, 'limited': [0, 0], 'limits': [0., 0.]}
    parinfo = []
    for i in range(len(pactual)):
        parinfo.append(copy.deepcopy(parbase))
    for i in range(len(pactual)):
        parinfo[i]['value'] = p0[i]
    fa = {'x': x, 'y': y, 'err': ey}
    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    if m.status <= 0:
        print('error message = ', m.errmsg)
    assert N.allclose(
        m.params, N.array([3.20996572, -1.7709542], dtype='float64'))
    assert N.allclose(
        m.perror, N.array([0.02221018, 0.01893756], dtype='float64'))
    chisq = (myfunctlin(m.params, x=x, y=y, err=ey)[1] ** 2).sum()

    assert N.allclose(
        N.array(
            [chisq], dtype='float64'), N.array(
            [2.756284983], dtype='float64'))
    assert m.dof == 8
    return
예제 #5
0
def test_rosenbrock():
    p0 = N.array([-1, 1.], dtype='float64')  # initial conditions
    pactual = N.array([1., 1.])  # actual minimum of the rosenbrock function
    m = mpfit(myfunctrosenbrock, p0)
    if m.status <= 0:
        print 'error message = ', m.errmsg
    assert m.status > 0
    assert N.allclose(m.params, pactual)
    assert N.allclose(m.fnorm, 0)
    return
예제 #6
0
def test_rosenbrock():
    p0 = N.array([-1, 1.], dtype='float64')  # initial conditions
    pactual = N.array([1., 1.])  # actual minimum of the rosenbrock function
    m = mpfit(myfunctrosenbrock, p0)
    if m.status <= 0:
        print('error message = ', m.errmsg)
    assert m.status > 0
    assert N.allclose(m.params, pactual)
    assert N.allclose(m.fnorm, 0)
    return
예제 #7
0
def test_rosenbrock():
    p0 = np.array([-1.0, 1.0])
    m = mpfit(myfunctrosenbrock, p0)
    res = m.optimize_result
    assert isinstance(res, OptimizeResult)
    assert res.success

    exp_param = np.array([1.0, 1.0])
    exp_fnorm = 0.0
    exp_error = np.array([0.70710678, 1.41598024])
    np.testing.assert_allclose(res.x, exp_param, rtol=5e-7)
    np.testing.assert_allclose(res.fnorm, exp_fnorm, rtol=5e-7)
    np.testing.assert_allclose(res.perror, exp_error, rtol=5e-7)
예제 #8
0
def test_linfit():
    p0, parinfo, fa = generate_toy_model()
    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    res = m.optimize_result

    assert res.success
    assert res.dof == 8
    assert res.status == 1

    np.testing.assert_allclose(m.params,
                               np.array([3.20996572, -1.7709542]),
                               rtol=5e-7)
    np.testing.assert_allclose(m.perror,
                               np.array([0.02221018, 0.01893756]),
                               rtol=5e-7)

    chisq = np.sum(
        myfunctlin(m.params, x=fa["x"], y=fa["y"], err=fa["err"])[1]**2)
    np.testing.assert_allclose(np.array([chisq]),
                               np.array([2.756284983]),
                               rtol=5e-7)
예제 #9
0
def test_rosenbrock_error():
    # Initial conditions
    p0 = np.array([-1.0, 1.0])
    m = mpfit(myfunctrosenbrock, p0, maxiter=1)
    assert m.status == 5
    np.testing.assert_allclose(m.params, p0, rtol=5e-7)
예제 #10
0
def boundedfit(
    model,
    old_p1,
    bounded=True,
    update_plot=False,
    return_info=True,
    **kwargs,
):
    '''
    Replica of bounded fit function from hyperspy/model.py, massively
    simplified, with multiple assumed variable values.

    Arguments:
    model -- The model to be fitted, this needs to be passed as fit(),
             in this case, is not a member function of the Model class
             as it is in model.py
    old_p1 -- Inherited parameter set.
    bounded -- Flag indicating whether bounded fit function should be used.
               Not strictly necessary for this function but a remnant from
               the hyperspy implementation where bounded fit is enclosed as
               an option inside fit() in hyperspy/model.py.
    update_plot -- Flag indicating whether to update any potential interactive
                   plot
    return_info -- Flag indicating whether or not to return results from
                   function
    **kwargs -- Keyword arguments to pass to mpfit() from
                hyperspy.external.mpfit.mpfit.

    Output:
    model.fit_output -- If return_info=True, return the output of the
                        fit function, mpfit()
    '''

    # Context manager, not quite sure what this does
    cm = (model.suspend_update if (update_plot != model._plot_active)
          and not update_plot else dummy_context_manager)

    # Bind existing parameters inside their prescribed limits
    if bounded:
        model.ensure_parameters_in_bounds()

    with cm(update_on_resume=True):
        model.p_std = None
        model.p0 = old_p1  # Set existing p0 to inherited p0
        old_p0 = model.p0  # For checking fit function success later

        weights = model._convert_variance_to_weights()

        args = (model.signal()[np.where(model.channel_switches)], weights)

        model._set_mpfit_parameters_info(bounded=bounded)

        auto_deriv = 1

        # Actual optimization function using hyperspy.external.mpfit.mpfit
        # The model._errfunc4mpfit calls a function which is defined in
        # ScalableReferencePattern.function() in pyxem/components/scalable_reference_pattern.py
        # model.mpfit_parinfo contains information about the limits set for the parameters
        res = mpfit(
            model._errfunc4mpfit,
            model.p0[:],
            parinfo=model.mpfit_parinfo,
            functkw={
                "y": model.signal()[model.channel_switches],
                "weights": weights,
            },
            autoderivative=auto_deriv,
            quiet=1,
            **kwargs,
        )

        # Create an object, OptimizeResult, to store optimization results
        model.fit_output = OptimizeResult(
            x=res.params,
            covar=res.covar,
            perror=res.perror,
            nit=res.niter,
            nfev=res.nfev,
            success=(res.status > 0) and (res.status != 5),
            status=res.status,
            message=res.errmsg,
            debug=res.debug,
            dof=res.dof,
            fnorm=res.fnorm,
        )

        model.p0 = model.fit_output.x  # Update parameter set in model
        ysize = len(model.fit_output.x) + model.fit_output.dof
        cost = model.fit_output.fnorm
        pcov = model.fit_output.perror**2  # Covariance of parameter fit results

        # Standard deviation of parameter fit results
        model.p_std = model._calculate_parameter_std(pcov, cost, ysize)

        # If only 1 parameter is in set p0 then turn into a list of
        # parameters 1 element long (for the sake of compatibility)
        if np.iterable(model.p0) == 0:
            model.p0 = (model.p0, )

        # Store parameter values in a map corresponding to the image data used
        # to create the model
        model._fetch_values_from_p0(p_std=model.p_std)
        model.store_current_values()
        model._calculate_chisq()
        model._set_current_degrees_of_freedom()

    # Check that the parameters have been changed by the fitting
    if np.any(old_p0 != model.p0):
        model.events.fitted.trigger(model)

    # Success check and error message output
    success = model.fit_output.get("success", None)
    if success is False:
        message = model.fit_output.get("message", "Unknown reason")
        _logger.warning(
            f"`m.fit()` did not exit successfully. Reason: {message}")

    if return_info:
        return model.fit_output
    else:
        return None