Esempio n. 1
0
def test_algorithms():
    #=======================================================================
    "Check that the value returned by the the grid and Brent algorithms coincide"

    t = np.linspace(0, 5, 80)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2)
    V = K @ P + whitegaussnoise(t, 0.02, seed=1)

    alpha_grid = selregparam(V,
                             K,
                             cvxnnls,
                             method='aic',
                             algorithm='grid',
                             regop=L)
    alpha_brent = selregparam(V,
                              K,
                              cvxnnls,
                              method='aic',
                              algorithm='brent',
                              regop=L)

    assert abs(1 - alpha_grid / alpha_brent) < 0.15
Esempio n. 2
0
def assert_full_output(method):

    t = np.linspace(0, 5, 80)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3, 0.4)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2)
    V = K @ P

    alpha, alphas_evaled, functional, residuals, penalties = selregparam(
        V,
        K,
        cvxnnls,
        method='aic',
        algorithm=method,
        full_output=True,
        regop=L)
    errors = []
    if np.size(alpha) != 1:
        errors.append("alphaopt is not a scalar")
    if len(functional) != len(alphas_evaled):
        errors.append(
            "The number of elements of functional values and evaluated alphas are different."
        )
    if len(residuals) != len(penalties):
        errors.append(
            "The number of elements of evluated residuals and penalties are different"
        )
    if not alpha in alphas_evaled:
        errors.append("The optimal alpha is not part of the evaluated alphas")
    assert not errors, f"Errors occured:\n{chr(10).join(errors)}"
Esempio n. 3
0
def test_manual_candidates():
    #=======================================================================
    "Check that the alpha-search range can be manually passed"

    t = np.linspace(0, 5, 80)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3, 0.15)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2, includeedges=True)
    alphas = np.linspace(-8, 2, 60)
    V = K @ P

    alpha_manual = np.log10(
        selregparam(V, K, cvxnnls, method='aic', candidates=alphas, regop=L))
    alpha_auto = np.log10(selregparam(V, K, cvxnnls, method='aic', regop=L))

    assert abs(alpha_manual - alpha_auto) < 1e-4
Esempio n. 4
0
def test_compensate_condition():
    #=======================================================================
    "Check that alpha compensates for larger condition numbers"

    r = np.linspace(2, 6, 100)
    P = dd_gauss(r, 3, 0.2)

    # Lower condition number
    t1 = np.linspace(0, 3, 200)
    K1 = dipolarkernel(t1, r)
    V1 = K1 @ P
    alpha1 = selregparam(V1, K1, cvxnnls, method='aic')

    # Larger condition number
    t2 = np.linspace(0, 3, 400)
    K2 = dipolarkernel(t2, r)
    V2 = K2 @ P
    alpha2 = selregparam(V2, K2, cvxnnls, method='aic')

    assert alpha2 > alpha1
Esempio n. 5
0
def get_alpha_from_method(method):

    t = np.linspace(0, 5, 500)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3.0, 0.16986436005760383)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2, includeedges=True)
    V = K @ P

    alpha = selregparam(V, K, cvxnnls, method=method, noiselvl=0, regop=L)
    return np.log10(alpha)
Esempio n. 6
0
def test_nonuniform_r():
    #=======================================================================
    "Check the value returned when using a non-uniform distance axis"

    t = np.linspace(0, 3, 200)
    r = np.sqrt(np.linspace(1, 7**2, 200))
    P = dd_gauss(r, 3, 0.2)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2)
    V = K @ P

    logalpha = np.log10(selregparam(V, K, cvxnnls, method='aic', regop=L))
    logalpharef = -6.8517

    assert abs(1 - logalpha / logalpharef) < 0.2
Esempio n. 7
0
    def linear_problem(y, A, optimize_alpha, alpha):
        #===========================================================================
        """
        Linear problem
        ------------------
        Solves the linear subproblem of the SNLLS objective function via linear LSQ 
        constrained, unconstrained, or regularized.
        """

        # If all linear parameters are frozen, do not optimize
        if Nlin_notfrozen == 0:
            return lin_parfrozen.astype(float), None, 0

        # Remove columns corresponding to frozen linear parameters
        Ared = A[:, ~lin_frozen]
        # Frozen component of the model response
        yfrozen = (A[:, lin_frozen] @ lin_parfrozen[lin_frozen]).astype(float)

        # Optimiza the regularization parameter only if needed
        if optimize_alpha:
            alpha = dl.selregparam((y - yfrozen)[mask],
                                   Ared[mask, :],
                                   linSolver,
                                   regparam,
                                   weights=weights[mask],
                                   regop=L,
                                   candidates=regparamrange,
                                   noiselvl=noiselvl,
                                   searchrange=regparamrange)

        # Components for linear least-squares
        AtA, Aty = _lsqcomponents((y - yfrozen)[mask],
                                  Ared[mask, :],
                                  L,
                                  alpha,
                                  weights=weights[mask])

        Ndof = np.maximum(0, np.trace(Ared @ np.linalg.pinv(AtA)))

        # Solve the linear least-squares problem
        result = linSolver(AtA, Aty)
        result = parseResult(result)
        xfit = validateResult(result)

        # Insert back the frozen linear parameters
        xfit = _insertfrozen(xfit, lin_parfrozen, lin_frozen)

        return xfit, alpha, Ndof
Esempio n. 8
0
def test_unconstrained():
    #=======================================================================
    "Check the algorithm works with unconstrained disributions"

    t = np.linspace(0, 5, 80)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3, 0.15)
    K = dipolarkernel(t, r)
    L = regoperator(r, 2, includeedges=True)
    V = K @ P

    logalpha = np.log10(
        selregparam(V, K, np.linalg.solve, method='aic', regop=L))
    logalpharef = -8.87

    assert abs(1 - logalpha / logalpharef) < 0.1
Esempio n. 9
0
def test_tikh_value():
    #=======================================================================
    "Check that the value returned by Tikhonov regularization"

    np.random.seed(1)
    t = np.linspace(0, 5, 500)
    r = np.linspace(2, 5, 80)
    P = dd_gauss(r, 3, 0.15)
    K = dipolarkernel(t, r)
    V = K @ P + whitegaussnoise(t, 0.01)
    L = regoperator(r, 2, includeedges=True)

    alpha = selregparam(V, K, cvxnnls, method='aic', regop=L)
    loga = np.log10(alpha)
    logaref = -3.51  # Computed with DeerLab-Matlab (0.9.2)

    assert abs(1 - loga / logaref) < 0.02  # less than 2% error