Example #1
0
    def test_xerr(self):
        c = Parameter(1.0, name='c')
        m = Parameter(2.0, name='m')
        p = c | m

        fit_model = Model(p, fitfunc=line3)
        assert_(fit_model._fitfunc_has_xerr is True)

        fit_model = Model(p, fitfunc=line2)
        assert_(fit_model._fitfunc_has_xerr is False)
Example #2
0
    def test_xerr(self):
        c = Parameter(1.0, name="c")
        m = Parameter(2.0, name="m")
        p = c | m

        fit_model = Model(p, fitfunc=line3)
        assert fit_model._fitfunc_has_xerr is True

        fit_model = Model(p, fitfunc=line2)
        assert fit_model._fitfunc_has_xerr is False
Example #3
0
    def test_evaluation(self):
        c = Parameter(1.0, name="c")
        m = Parameter(2.0, name="m")
        p = c | m

        fit_model = Model(p, fitfunc=line)
        x = np.linspace(0, 100.0, 20)
        y = 2.0 * x + 1.0

        # different ways of getting the model instance to evaluate
        assert_equal(fit_model.model(x, p), y)
        assert_equal(fit_model(x, p), y)
        assert_equal(fit_model.model(x), y)
        assert_equal(fit_model(x), y)

        # can we pickle the model object
        pkl = pickle.dumps(fit_model)
        unpkl = pickle.loads(pkl)
        assert_equal(unpkl(x), y)

        # you should be able to use a lambda
        fit_model = Model(p, fitfunc=line2)
        assert_equal(fit_model(x, p), y)

        # and swap the order of parameters - retrieve by key
        p = m | c
        fit_model = Model(p, fitfunc=line2)
        assert_equal(fit_model(x, p), y)
Example #4
0
    def setup(self):
        # Reproducible results!
        np.random.seed(123)

        m_true = -0.9594
        b_true = 4.294
        f_true = 0.534
        m_ls = -1.1040757010910947
        b_ls = 5.4405552502319505

        # Generate some synthetic data from the model.
        N = 50
        x = np.sort(10 * np.random.rand(N))
        y_err = 0.1 + 0.5 * np.random.rand(N)
        y = m_true * x + b_true
        y += np.abs(f_true * y) * np.random.randn(N)
        y += y_err * np.random.randn(N)

        data = Data1D(data=(x, y, y_err))

        p = Parameter(b_ls, 'b', vary=True, bounds=(-100, 100))
        p |= Parameter(m_ls, 'm', vary=True, bounds=(-100, 100))

        model = Model(p, fitfunc=line)
        self.objective = Objective(model, data)
        self.mcfitter = CurveFitter(self.objective)
        self.mcfitter_t = CurveFitter(self.objective, ntemps=20)

        self.mcfitter.initialise('prior')
        self.mcfitter_t.initialise('prior')
Example #5
0
def NIST_runner(
    dataset,
    method="least_squares",
    chi_atol=1e-5,
    val_rtol=1e-2,
    err_rtol=6e-3,
):
    NIST_dataset = ReadNistData(dataset)
    x, y = (NIST_dataset["x"], NIST_dataset["y"])

    if dataset == "Nelson":
        y = np.log(y)

    params = NIST_dataset["start"]

    fitfunc = NIST_Models[dataset][0]
    model = Model(params, fitfunc)
    objective = Objective(model, (x, y))
    fitter = CurveFitter(objective)
    result = fitter.fit(method=method)

    assert_allclose(objective.chisqr(),
                    NIST_dataset["sum_squares"],
                    atol=chi_atol)

    certval = NIST_dataset["cert_values"]
    assert_allclose(result.x, certval, rtol=val_rtol)

    if "stderr" in result:
        certerr = NIST_dataset["cert_stderr"]
        assert_allclose(result.stderr, certerr, rtol=err_rtol)
Example #6
0
    def test_ND_model(self):
        # Check that ND data can be passed to/from a model
        # Here we see if x can be multidimensional, and that y can return
        # multidimensional data.
        # It should be up to the user to ensure that the Data/Model/Objective
        # stack is returning something consistent with each other.
        # e.g. the Model output should return something with the same shape as
        # Data.y.
        rng = np.random.default_rng()
        x = rng.uniform(size=100).reshape(2, 50)

        c = Parameter(1.0, name="c")
        m = Parameter(2.0, name="m")
        p = c | m

        # check that the function is returning what it's supposed to before
        # we test Model
        y0 = line(x[0], p)
        y1 = line(x[1], p)
        desired = np.vstack((y0, y1))
        assert_allclose(line_ND(x, p), desired)

        fit_model = Model(p, fitfunc=line_ND)
        y = fit_model(x)
        assert_allclose(y, desired)
Example #7
0
def NIST_runner(dataset, method='least_squares', chi_atol=1e-5,
                val_rtol=1e-2, err_rtol=5e-3):
    NIST_dataset = ReadNistData(dataset)
    x, y = (NIST_dataset['x'], NIST_dataset['y'])

    if dataset == 'Nelson':
        y = np.log(y)

    params = NIST_dataset['start']

    fitfunc = NIST_Models[dataset][0]
    model = Model(params, fitfunc)
    objective = Objective(model, (x, y))
    fitter = CurveFitter(objective)
    result = fitter.fit(method=method)

    assert_allclose(objective.chisqr(),
                    NIST_dataset['sum_squares'],
                    atol=chi_atol)

    certval = NIST_dataset['cert_values']
    assert_allclose(result.x, certval, rtol=val_rtol)

    if 'stderr' in result:
        certerr = NIST_dataset['cert_stderr']
        assert_allclose(result.stderr, certerr, rtol=err_rtol)
Example #8
0
    def setup_method(self, tmpdir):
        self.path = os.path.dirname(os.path.abspath(__file__))
        self.tmpdir = tmpdir.strpath

        theoretical = np.loadtxt(os.path.join(self.path, "gauss_data.txt"))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        # these best weighted values and uncertainties obtained with Igor
        self.best_weighted = [-0.00246095, 19.5299, -8.28446e-2, 1.24692]

        self.best_weighted_errors = [
            0.0220313708486,
            1.12879436221,
            0.0447659158681,
            0.0412022938883,
        ]

        self.best_weighted_chisqr = 77.6040960351

        self.best_unweighted = [
            -0.10584111872702096,
            19.240347049328989,
            0.0092623066070940396,
            1.501362314145845,
        ]

        self.best_unweighted_errors = [
            0.34246565477,
            0.689820935208,
            0.0411243173041,
            0.0693429375282,
        ]

        self.best_unweighted_chisqr = 497.102084956

        self.p0 = np.array([0.1, 20.0, 0.1, 0.1])
        self.names = ["bkg", "A", "x0", "width"]
        self.bounds = [(-1, 1), (0, 30), (-5.0, 5.0), (0.001, 2)]

        self.params = Parameters(name="gauss_params")
        for p, name, bound in zip(self.p0, self.names, self.bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            self.params.append(param)

        self.model = Model(self.params, fitfunc=gauss)
        self.data = Data1D((xvals, yvals, evals))
        self.objective = Objective(self.model, self.data)
        return 0
Example #9
0
    def setup_method(self):
        # Choose the "true" parameters.

        # Reproducible results!
        np.random.seed(123)

        self.m_true = -0.9594
        self.b_true = 4.294
        self.f_true = 0.534
        self.m_ls = -1.1040757010910947
        self.b_ls = 5.4405552502319505

        # Generate some synthetic data from the model.
        N = 50
        x = np.sort(10 * np.random.rand(N))
        y_err = 0.1 + 0.5 * np.random.rand(N)
        y = self.m_true * x + self.b_true
        y += np.abs(self.f_true * y) * np.random.randn(N)
        y += y_err * np.random.randn(N)

        self.data = Data1D(data=(x, y, y_err))

        self.p = Parameter(self.b_ls, 'b') | Parameter(self.m_ls, 'm')
        self.model = Model(self.p, fitfunc=line)
        self.objective = Objective(self.model, self.data)

        # want b and m
        self.p[0].vary = True
        self.p[1].vary = True

        mod = np.array([4.78166609, 4.42364699, 4.16404064, 3.50343504,
                        3.4257084, 2.93594347, 2.92035638, 2.67533842,
                        2.28136038, 2.19772983, 1.99295496, 1.93748334,
                        1.87484436, 1.65161016, 1.44613461, 1.11128101,
                        1.04584535, 0.86055984, 0.76913963, 0.73906649,
                        0.73331407, 0.68350418, 0.65216599, 0.59838566,
                        0.13070299, 0.10749131, -0.01010195, -0.10010155,
                        -0.29495372, -0.42817431, -0.43122391, -0.64637715,
                        -1.30560686, -1.32626428, -1.44835768, -1.52589881,
                        -1.56371158, -2.12048349, -2.24899179, -2.50292682,
                        -2.53576659, -2.55797996, -2.60870542, -2.7074727,
                        -3.93781479, -4.12415366, -4.42313742, -4.98368609,
                        -5.38782395, -5.44077086])
        self.mod = mod
Example #10
0
    def test_multidimensionality(self):
        # Check that ND data can be used with an objective/model/data
        # (or at least it doesn't stand in the way)
        rng = np.random.default_rng()
        x = rng.uniform(size=100).reshape(50, 2)

        desired = line_ND(x, self.p)
        assert desired.shape == (50, 2)
        data = Data1D((x, desired))

        model = Model(self.p, fitfunc=line_ND)
        y = model(x)
        assert_allclose(y, desired)

        objective = Objective(model, data)
        assert_allclose(objective.chisqr(), 0)
        assert_allclose(objective.generative(), desired)
        assert_allclose(objective.residuals(), 0)
        assert objective.residuals().shape == (50, 2)

        objective.logl()
        objective.logpost()
        covar = objective.covar()
        assert covar.shape == (2, 2)
Example #11
0
    def test_covar(self):
        # checks objective.covar against optimize.least_squares covariance.
        path = os.path.dirname(os.path.abspath(__file__))

        theoretical = np.loadtxt(os.path.join(path, 'gauss_data.txt'))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        p0 = np.array([0.1, 20., 0.1, 0.1])
        names = ['bkg', 'A', 'x0', 'width']
        bounds = [(-1, 1), (0, 30), (-5., 5.), (0.001, 2)]

        params = Parameters(name="gauss_params")
        for p, name, bound in zip(p0, names, bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            params.append(param)

        model = Model(params, fitfunc=gauss)
        data = Data1D((xvals, yvals, evals))
        objective = Objective(model, data)

        # first calculate least_squares jac/hess/covariance matrices
        res = least_squares(objective.residuals,
                            np.array(params),
                            jac='3-point')

        hess_least_squares = np.matmul(res.jac.T, res.jac)
        covar_least_squares = np.linalg.inv(hess_least_squares)

        # now calculate corresponding matrices by hand, to see if the approach
        # concurs with least_squares
        objective.setp(res.x)
        _pvals = np.array(res.x)

        def residuals_scaler(vals):
            return np.squeeze(objective.residuals(_pvals * vals))

        jac = approx_derivative(residuals_scaler, np.ones_like(_pvals))
        hess = np.matmul(jac.T, jac)
        covar = np.linalg.inv(hess)

        covar = covar * np.atleast_2d(_pvals) * np.atleast_2d(_pvals).T

        assert_allclose(covar, covar_least_squares)

        # check that objective.covar corresponds to the least_squares
        # covariance matrix
        objective.setp(res.x)
        _pvals = np.array(res.x)
        covar_objective = objective.covar()
        assert_allclose(covar_objective, covar_least_squares)

        # now see what happens with a parameter that has no effect on residuals
        param = Parameter(1.234, name='dummy')
        param.vary = True
        params.append(param)

        from pytest import raises
        with raises(LinAlgError):
            objective.covar()