예제 #1
0
def test_complex_model_uncertainty():
    # ======================================================================
    "Check the fit of a real-valued model to complex-valued data"

    x = np.linspace(0, 7, 100)

    def model(p):
        phase, center, width = p
        y = dd_gauss(x, center, width)
        y = y * np.exp(-1j * phase)
        return y

    y = model([np.pi / 5, 3, 0.5])

    y = y + whitegaussnoise(x, 0.01)
    y = y + 1j * whitegaussnoise(x, 0.05)

    fitResult = snlls(y,
                      model,
                      par0=[2 * np.pi / 5, 4, 0.2],
                      lb=[-np.pi, 1, 0.05],
                      ub=[np.pi, 6, 5])
    ciwidth = np.sum(
        fitResult.modelUncert.ci(95)[:, 1] -
        fitResult.modelUncert.ci(95)[:, 0])

    assert (ciwidth.real < ciwidth.imag).all()
예제 #2
0
def test_multiple_datasets():
    # ======================================================================
    "Check bootstrapping when using multiple input datasets"

    t1 = np.linspace(0, 5, 200)
    t2 = np.linspace(-0.5, 3, 300)
    r = np.linspace(2, 6, 300)
    P = dd_gauss(r, 4, 0.8)
    K1 = dipolarkernel(t1, r)
    K2 = dipolarkernel(t2, r)

    Vexp1 = K1 @ P + whitegaussnoise(t1, 0.01, seed=1)
    Vexp2 = K2 @ P + whitegaussnoise(t2, 0.02, seed=2)

    def Vmodel(par):
        V1 = K1 @ dd_gauss(r, *par)
        V2 = K2 @ dd_gauss(r, *par)
        return [V1, V2]

    par0 = [3, 0.5]
    fit = snlls([Vexp1, Vexp2], Vmodel, par0)
    Vfit1, Vfit2 = fit.model

    def bootfcn(V):
        fit = snlls(V, Vmodel, par0)
        return fit.nonlin

    paruq = bootstrap_analysis(bootfcn, [Vexp1, Vexp2], [Vfit1, Vfit2], 5)

    assert all(abs(paruq.mean - fit.nonlin) < 1.5e-2)
예제 #3
0
def test_noseed():
    # ======================================================================
    "Check that the seed keyword is working if set to None"

    N = 151
    t = np.linspace(0, 3, N)
    sig = 0.2
    noise1 = whitegaussnoise(t, sig, seed=None)
    noise2 = whitegaussnoise(t, sig, seed=None)

    assert not np.array_equal(noise1, noise2)
예제 #4
0
def test_seed():
    # ======================================================================
    "Check that the seed keyword is working"

    N = 151
    t = np.linspace(0, 3, N)
    sig = 0.2
    s = 432
    noise1 = whitegaussnoise(t, sig, seed=s)
    noise2 = whitegaussnoise(t, sig, seed=s)

    assert np.array_equal(noise1, noise2)
예제 #5
0
def generate_global_dataset():

    t1 = np.linspace(-0.5, 3, 200)
    t2 = np.linspace(-0.5, 4, 200)
    r = np.linspace(2.5, 5, 80)
    P = dd_gauss2(r, 3.7, 4.3, 0.2, 0.1, 0.5, 0.5)
    P /= np.trapz(P, r)
    K1 = dipolarkernel(t1, r)
    K2 = dipolarkernel(t2, r)
    np.random.seed(1)
    V1 = K1 @ P + whitegaussnoise(t1, 0.01, seed=1, rescale=True)
    np.random.seed(2)
    V2 = K2 @ P + whitegaussnoise(t2, 0.01, seed=1, rescale=True)

    return r, P, V1, V2, K1, K2
예제 #6
0
def test_global_weights_default():
    # ======================================================================
    "Check the correct fit of two signals when one is of very low quality"

    t = np.linspace(0, 5, 300)
    r = np.linspace(2, 6, 90)
    P = dd_gauss(r, 4.5, 0.25)

    K = dipolarkernel(t, r)
    scales = [1e3, 1e3]
    V1 = scales[0] * K @ P + whitegaussnoise(t, 0.001, seed=1)
    V2 = scales[1] * K @ P + whitegaussnoise(t, 0.1, seed=1)

    fit = snlls([V1, V2], [K, K], lbl=np.zeros_like(r))

    assert ovl(P, fit.param) > 0.95
예제 #7
0
def test_extrapenalty():
    # ======================================================================
    "Check that custom penalties can be passed and act on the solution"

    t = np.linspace(0, 3, 300)
    r = np.linspace(2, 5, 200)
    P = dd_gauss2(r, 3.5, 0.5, 0.5, 4, 0.1, 0.5)
    K = dipolarkernel(t, r)
    V = K @ P + whitegaussnoise(t, 0.15, seed=1)

    par0 = [2.5, 0.01, 0.1, 4.5, 0.01, 0.6]
    lb = [1, 0.01, 0, 1, 0.01, 0]
    ub = [20, 1, 1, 20, 1, 1]
    # Fit case it fails, stuck at "spicky" Gaussians
    model = lambda p: K @ dd_gauss2(r, *p)
    fit = snlls(V, model, par0, lb, ub)

    # Fit with Tikhonov penalty on the Gaussians model
    L = regoperator(r, 2)
    alpha = 1e-4
    tikhonov = lambda p, _: alpha * L @ dd_gauss2(r, *p)
    fit_tikh = snlls(V, model, par0, lb, ub, extrapenalty=tikhonov)

    Pfit = dd_gauss2(r, *fit.nonlin)
    Pfit_tikh = dd_gauss2(r, *fit_tikh.nonlin)

    assert ovl(P, Pfit) < ovl(P, Pfit_tikh)
예제 #8
0
def test_algorithms():
    #=======================================================================
    "Check that the value returned by the the grid and Brent algorithms coincide"

    t = np.linspace(0, 5, 80)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2)
    V = K @ P + whitegaussnoise(t, 0.02, seed=1)

    alpha_grid = selregparam(V,
                             K,
                             cvxnnls,
                             method='aic',
                             algorithm='grid',
                             regop=L)
    alpha_brent = selregparam(V,
                              K,
                              cvxnnls,
                              method='aic',
                              algorithm='brent',
                              regop=L)

    assert abs(1 - alpha_grid / alpha_brent) < 0.15
예제 #9
0
def test_confinter_model():
    #=======================================================================
    "Check that the confidence intervals of the fitted model are correct"

    # Prepare test data
    r = np.linspace(1, 8, 150)
    t = np.linspace(0, 4, 200)
    lam = 0.25
    K = dipolarkernel(t, r, mod=lam)
    parin = [3.5, 0.4, 0.6, 4.5, 0.5, 0.4]
    P = dd_gauss2(r, *parin)
    V = K @ P + whitegaussnoise(t, 0.05, seed=1)

    nlpar0 = 0.2
    lb = 0
    ub = 1
    lbl = np.full(len(r), 0)
    # Separable LSQ fit
    fit = snlls(V, lambda lam: dipolarkernel(t, r, mod=lam), nlpar0, lb, ub,
                lbl)
    Vfit = fit.model
    Vuq = fit.modelUncert
    Vci50 = Vuq.ci(50)
    Vci95 = Vuq.ci(95)

    Vlb = np.full(len(t), -np.inf)
    Vub = np.full(len(t), np.inf)

    assert_confidence_intervals(Vci50, Vci95, Vfit, Vlb, Vub)
예제 #10
0
def test_goodness_of_fit():
    #============================================================
    "Check the goodness-of-fit statistics are correct"

    # Prepare test data
    r = np.linspace(2, 5, 150)
    t = np.linspace(-0.2, 4, 100)
    lam = 0.25
    K = dipolarkernel(t, r, mod=lam)
    parin = [3.5, 0.15, 0.6, 4.5, 0.2, 0.4]
    P = dd_gauss2(r, *parin)
    sigma = 0.03
    V = K @ P + whitegaussnoise(t, sigma, seed=2, rescale=True)

    # Non-linear parameters
    # nlpar = [lam]
    nlpar0 = 0.2
    lb = 0
    ub = 1
    # Linear parameters: non-negativity
    lbl = np.zeros(len(r))
    ubl = []
    # Separable LSQ fit
    fit = snlls(V,
                lambda lam: dipolarkernel(t, r, mod=lam),
                nlpar0,
                lb,
                ub,
                lbl,
                ubl,
                noiselvl=sigma,
                uq=False)
    stats = fit.stats

    assert abs(stats['chi2red'] - 1) < 0.05
예제 #11
0
def test_multiple_penalties():
    # ======================================================================
    "Check that multiple additional penaltyies can be passed correctly"

    t = np.linspace(0, 5, 300)
    r = np.linspace(2, 6, 90)
    P = dd_gauss(r, 4.5, 0.25)
    param = 0.2
    K = dipolarkernel(t, r, mod=param)
    V = K @ P + whitegaussnoise(t, 0.001, seed=1)
    dr = np.mean(np.diff(r))
    beta = 0.05
    R = 0.5
    compactness_penalty = lambda pnonlin, plin: beta * np.sqrt(plin * (
        r - np.trapz(plin * r, r))**2 * dr)
    radial_penalty = lambda pnonlin, plin: 1 / R**2 * (np.linalg.norm(
        (pnonlin - param) / param - R))**2

    Kmodel = lambda lam: dipolarkernel(t, r, mod=lam)
    fit0 = snlls(V,
                 Kmodel,
                 par0=0.2,
                 lb=0,
                 ub=1,
                 lbl=np.zeros_like(r),
                 extrapenalty=[compactness_penalty])
    fitmoved = snlls(V,
                     Kmodel,
                     par0=0.2,
                     lb=0,
                     ub=1,
                     lbl=np.zeros_like(r),
                     extrapenalty=[compactness_penalty, radial_penalty])

    assert ovl(P, fit0.lin) > ovl(P, fitmoved.lin)
예제 #12
0
def test_confinter_linear():
    #=======================================================================
    "Check that the confidence intervals of the linear parameters are correct"

    # Prepare test data
    r = np.linspace(1, 8, 150)
    t = np.linspace(0, 4, 200)
    lam = 0.25
    K = dipolarkernel(t, r, mod=lam)
    parin = [3.5, 0.4, 0.6, 4.5, 0.5, 0.4]
    P = dd_gauss2(r, *parin)
    V = K @ P + whitegaussnoise(t, 0.05, seed=1)

    # Non-linear parameters
    # nlpar = [lam]
    nlpar0 = 0.2
    lb = 0
    ub = 1
    # Linear parameters: non-negativity
    lbl = np.zeros(len(r))
    ubl = np.full(len(r), np.inf)
    # Separable LSQ fit
    fit = snlls(V, lambda lam: dipolarkernel(t, r, mod=lam), nlpar0, lb, ub,
                lbl)
    Pfit = np.round(fit.lin, 6)
    uq = fit.linUncert
    Pci50 = np.round(uq.ci(50), 6)
    Pci95 = np.round(uq.ci(95), 6)

    assert_confidence_intervals(Pci50, Pci95, Pfit, lbl, ubl)
예제 #13
0
def test_length():
    # ======================================================================
    "Check that the array length is correct"

    N = 151
    t = np.linspace(0, 3, N)
    noise = whitegaussnoise(t, 0.2)

    assert len(noise) == N
예제 #14
0
def test_global_weights_default():
    # ======================================================================
    "Check the correct fit of two signals when one is of very low quality"

    t = np.linspace(0, 5, 300)
    r = np.linspace(2, 6, 90)
    param = [4.5, 0.25]
    P = dd_gauss(r, *param)

    K = dipolarkernel(t, r, mod=0.2)
    scales = [1e3, 1e9]
    V1 = scales[0] * (K @ P + whitegaussnoise(t, 0.001, seed=1))
    V2 = scales[1] * (K @ P + whitegaussnoise(t, 0.1, seed=1))

    Kmodel = lambda lam: [dipolarkernel(t, r, mod=lam)] * 2
    fit = snlls([V1, V2], Kmodel, par0=[0.2], lb=0, ub=1, lbl=np.zeros_like(r))

    assert ovl(P, fit.lin) > 0.93
예제 #15
0
def test_global_weights():
    # ======================================================================
    "Check that the global weights properly work when specified"

    t = np.linspace(0, 5, 300)
    r = np.linspace(2, 8, 150)
    K = dipolarkernel(t, r)

    param1 = [3, 0.2]
    param2 = [5, 0.2]
    P1 = dd_gauss(r, *param1)
    P2 = dd_gauss(r, *param2)
    V1 = K @ P1 + whitegaussnoise(t, 0.01, seed=1)
    V2 = K @ P2 + whitegaussnoise(t, 0.01, seed=1)

    fit1 = snlls([V1, V2], [K, K], lbl=np.zeros_like(r), weights=[1, 1e-10])
    fit2 = snlls([V1, V2], [K, K], lbl=np.zeros_like(r), weights=[1e-10, 1])

    assert ovl(P1, fit1.param) > 0.95 and ovl(P2, fit2.param) > 0.95
예제 #16
0
def test_rescale():
    # ======================================================================
    "Check that the rescale keyword is working"

    N = 10
    t = np.linspace(0, 3, N)
    sig = 1.1234
    noise = whitegaussnoise(t, sig, rescale=True)

    assert np.isclose(np.std(noise), sig)
예제 #17
0
def test_memory_limit():
    "Check that the memory limit works for too large analyses"
    # Request one million samples, approx. 80GB
    yfit = np.linspace(0, 1, int(1e4))
    yexp = yfit + whitegaussnoise(yfit, 0.01)

    def bootfcn(y):
        return y

    with pytest.raises(MemoryError):
        paruq = bootstrap_analysis(bootfcn, yexp, yfit, 1e6)
예제 #18
0
def assert_solver(solver):
    #============================================================

    np.random.seed(1)
    t = np.linspace(-2, 4, 300)
    r = np.linspace(2, 6, 100)
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r)
    V = K @ P + whitegaussnoise(t, 0.01)
    fit = snlls(V, K, lbl=np.zeros_like(r), nnlsSolver=solver, uq=False)

    assert ovl(P, fit.param) > 0.95  # more than 95% overlap
예제 #19
0
def test_global_weights_default():
    # ======================================================================
    "Check the correct fit of two signals when one is of very low quality"

    t = np.linspace(0, 5, 300)
    r = np.linspace(2, 6, 90)
    param = [4.5, 0.25]
    P = dd_gauss(r, *param)

    K = dipolarkernel(t, r)
    scales = [1e3, 1e9]
    V1 = scales[0] * K @ P + whitegaussnoise(t, 0.001, seed=1)
    V2 = scales[1] * K @ P + whitegaussnoise(t, 0.1, seed=1)

    par0 = [5, 0.5]
    lb = [1, 0.1]
    ub = [20, 1]
    model = lambda p: [K @ dd_gauss(r, *p)] * 2
    fit = snlls([V1, V2], model, par0, lb, ub, weights=[1, 0])

    assert all(abs(fit.nonlin / param - 1) < 0.03)
예제 #20
0
def test_complex():
    #============================================================
    "Check estimation of noiselevel using a complex signal"

    t = np.linspace(0, 3, 200)
    r = np.linspace(2, 6, 100)
    P = dd_gauss(r, 4, 0.4)
    lam = 0.25
    B = bg_exp(t, 1.5)

    np.random.seed(1)
    noise = whitegaussnoise(t, 0.03)
    np.random.seed(2)
    noisec = 1j * whitegaussnoise(t, 0.03)
    V = dipolarkernel(t, r, mod=lam, bg=B) @ P
    Vco = V * np.exp(-1j * np.pi / 5)
    Vco = Vco + noise + noisec
    truelevel = np.std(noise)
    approxlevel = noiselevel(Vco, 'complex')

    assert abs(truelevel - approxlevel) < 1e-2
예제 #21
0
def test_confinter_scaling():
    #============================================================
    "Check that the confidence intervals are agnostic w.r.t. scaling"

    # Prepare test data
    r = np.linspace(1, 8, 80)
    t = np.linspace(0, 4, 50)
    lam = 0.25
    K = dipolarkernel(t, r, mod=lam)
    parin = [3.5, 0.4, 0.6, 4.5, 0.5, 0.4]
    P = dd_gauss2(r, *parin)
    V = K @ P + whitegaussnoise(t, 0.01, seed=1)
    # Non-linear parameters
    nlpar0 = 0.2
    lb = 0
    ub = 1
    # Linear parameters: non-negativity
    lbl = np.zeros(len(r))
    V0_1 = 1
    V0_2 = 1e8

    # Separable LSQ fit
    fit1 = snlls(V * V0_1,
                 lambda lam: dipolarkernel(t, r, mod=lam),
                 nlpar0,
                 lb,
                 ub,
                 lbl,
                 nonlin_tol=1e-3)
    fit2 = snlls(V * V0_2,
                 lambda lam: dipolarkernel(t, r, mod=lam),
                 nlpar0,
                 lb,
                 ub,
                 lbl,
                 nonlin_tol=1e-3)

    # Assess linear parameter uncertainties
    ci1 = fit1.linUncert.ci(95)
    ci2 = fit2.linUncert.ci(95)
    ci1[ci1 == 0] = 1e-16
    ci2[ci2 == 0] = 1e-16

    assert np.max(abs(ci2 / V0_2 - ci1)) < 1e-6

    # Assess nonlinear parameter uncertainties
    ci1 = fit1.nonlinUncert.ci(95)
    ci2 = fit2.nonlinUncert.ci(95)
    ci1[ci1 == 0] = 1e-16
    ci2[ci2 == 0] = 1e-16

    assert np.max(abs(ci2 - ci1)) < 1e-6
예제 #22
0
def test_complex_values():
    # ======================================================================
    "Check the functionality of the bootstrapping with complex-valued outputs"

    t = np.linspace(0, 5, 200)
    r = np.linspace(2, 6, 300)
    P = dd_gauss(r, 4, 0.8)
    K = dipolarkernel(t, r)
    Vexp = K @ P + whitegaussnoise(t, 0.01, seed=1)
    Vexp = Vexp + 1j * whitegaussnoise(t, 0.01, seed=1)
    par0 = [3, 0.5]
    Vmodel = lambda par: K @ dd_gauss(r, *par) + 1j * np.zeros_like(t)
    fit = snlls(Vexp, Vmodel, par0)
    Vfit = fit.model

    def bootfcn(V):
        fit = snlls(V, Vmodel, par0)
        return fit.nonlin

    paruq = bootstrap_analysis(bootfcn, Vexp, Vfit, 3)

    assert all(abs(paruq.mean - fit.nonlin) < 1.5e-2)
예제 #23
0
def test_confinter_Vfit():
    #============================================================
    "Check that the confidence intervals are correctly for the fitted signal"

    t = np.linspace(-2, 4, 300)
    r = np.linspace(2, 6, 100)
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r, mod=0.2)
    V = K @ P + whitegaussnoise(t, 0.05)

    fit = snlls(V, K, lbl=np.zeros_like(r))

    assert_confidence_intervals(fit.modelUncert, fit.model)
예제 #24
0
def test_global_weights():
    # ======================================================================
    "Check that the global weights properly work when specified"

    t = np.linspace(-0.3, 5, 300)
    r = np.linspace(2, 6, 150)

    P1 = dd_gauss(r, 3, 0.2)
    P2 = dd_gauss(r, 5, 0.2)

    K = dipolarkernel(t, r, mod=0.2)

    scales = [1e3, 1e9]
    sigma1 = 0.001
    V1 = K @ P1 + whitegaussnoise(t, sigma1, seed=1)
    sigma2 = 0.001
    V2 = K @ P2 + whitegaussnoise(t, sigma2, seed=1)

    V1 = scales[0] * V1
    V2 = scales[1] * V2

    Kmodel = lambda lam: [dipolarkernel(t, r, mod=lam)] * 2
    fit1 = snlls([V1, V2],
                 Kmodel,
                 par0=[0.2],
                 lb=0,
                 ub=1,
                 lbl=np.zeros_like(r),
                 weights=[1, 1e-10])
    fit2 = snlls([V1, V2],
                 Kmodel,
                 par0=[0.2],
                 lb=0,
                 ub=1,
                 lbl=np.zeros_like(r),
                 weights=[1e-10, 1])

    assert ovl(P1, fit1.lin) > 0.93 and ovl(P2, fit2.lin) > 0.93
예제 #25
0
def test_globalmodel():
    "Check the profiling works with multiple datasets"

    r = np.linspace(2, 6, 300)
    sigma = 0.1
    modelA = deepcopy(dd_gauss)
    modelB = deepcopy(dd_gauss)
    model = merge(modelA, modelB)
    model = link(model,
                 mean=['mean_1', 'mean_2'],
                 width=['width_1', 'width_2'])
    y = model(r, r, mean=3, width=0.2, scale_1=1, scale_2=1)
    y[0] += whitegaussnoise(r, sigma, seed=1)
    y[1] += whitegaussnoise(r, sigma, seed=1)

    profuq = profile_analysis(model, y, r, r, samples=3, noiselvl=sigma)

    x, pdf = profuq['mean'].pardist()
    mean_mean = x[np.argmax(pdf)]
    x, pdf = profuq['width'].pardist()
    width_mean = x[np.argmax(pdf)]

    assert np.allclose([mean_mean, width_mean], [3, 0.2], rtol=1e-2)
예제 #26
0
def test_cost_value():
    #============================================================
    "Check that the cost value is properly returned"

    np.random.seed(1)
    t = np.linspace(-2, 4, 300)
    r = np.linspace(2, 6, 100)
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r)
    V = K @ P + whitegaussnoise(t, 0.01)
    fit = snlls(V, K, lbl=np.zeros_like(r))

    assert isinstance(fit.cost, float) and np.round(
        fit.cost / np.sum(fit.residuals**2), 5) == 1
예제 #27
0
def test_tikh_with_noise():
    #============================================================
    "Check the Tikhonov regularization method with noise"

    np.random.seed(1)
    t = np.linspace(0, 3, 200)
    r = np.linspace(1, 5, 100)
    P = dd_gauss(r, 3, 0.08)
    K = dipolarkernel(t, r)
    V = K @ P + whitegaussnoise(t, 0.01)

    fit = snlls(V, K, lbl=np.zeros_like(r))

    assert ovl(P, fit.param) > 0.95  # more than 95% overlap
예제 #28
0
def test_global_weights():
    # ======================================================================
    "Check that the global weights properly work when specified"

    t = np.linspace(0, 5, 300)
    r = np.linspace(2, 8, 600)
    K = dipolarkernel(t, r)

    param1 = [3, 0.2]
    param2 = [5, 0.2]
    P1 = dd_gauss(r, *param1)
    P2 = dd_gauss(r, *param2)
    V1 = K @ P1 + whitegaussnoise(t, 0.01, seed=1)
    V2 = K @ P2 + whitegaussnoise(t, 0.01, seed=1)

    par0 = [5, 0.5]
    lb = [1, 0.1]
    ub = [20, 1]
    model = lambda p: [K @ dd_gauss(r, *p)] * 2
    fit1 = snlls([V1, V2], model, par0, lb, ub, weights=[1, 1e-10])
    fit2 = snlls([V1, V2], model, par0, lb, ub, weights=[1e-10, 1])

    assert all(abs(fit1.nonlin / param1 - 1) < 0.03) and all(
        abs(fit2.nonlin / param2 - 1) < 0.03)
예제 #29
0
def test_basics():
    "Check the basic functionality of the profiling"

    r = np.linspace(2, 6, 300)
    sigma = 0.1
    model = dd_gauss
    y = model(r, mean=3, width=0.2) + whitegaussnoise(r, sigma, seed=1)

    profuq = profile_analysis(model, y, r, samples=3, noiselvl=sigma)

    x, pdf = profuq['mean'].pardist()
    mean_mean = x[np.argmax(pdf)]
    x, pdf = profuq['width'].pardist()
    width_mean = x[np.argmax(pdf)]

    assert np.allclose([mean_mean, width_mean], [3, 0.2], rtol=1e-2)
예제 #30
0
def test_goodness_of_fit():
    # ======================================================================
    "Check the goodness-of-fit statistics are correct even with arbitrary scaling"

    t = np.linspace(0, 3, 300)
    r = np.linspace(2, 5, 200)
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r)
    sigma = 0.03
    V = K @ P + whitegaussnoise(t, sigma, seed=1, rescale=True)

    par0 = [5, 0.5]
    model = lambda p: K @ dd_gauss(r, *p)
    fit = snlls(V, model, par0, lb=[1, 0.1], ub=[20, 1], noiselvl=sigma)

    assert abs(fit.stats['chi2red'] - 1) < 0.05