Exemplo n.º 1
0
def unconstrained_analysis_pursuit(measurements,
                                   acqumatrix,
                                   operator,
                                   lambda1,
                                   lambda2,
                                   OmegaPinv=None,
                                   P=None):

    gammasize, signalsize = operator.shape
    gamma = np.zeros(gammasize)
    Lambdahat = np.arange(gammasize)
    if OmegaPinv is None:
        OmegaPinv = np.linalg.pinv(operator)
    if P is None:
        U, S, Vt = np.linalg.svd(OmegaPinv)
        P = Vt[-(gammasize - signalsize):, :]
    residual = measurements - np.dot(np.dot(acqumatrix, OmegaPinv), gamma)

    while True:  # exit from inside with break

        # Minimization problem
        I_Lambda_k = np.zeros((Lambdahat.size, gammasize))
        I_Lambda_k[range(Lambdahat.size), Lambdahat] = 1
        system_matrix = np.concatenate(
            (np.dot(acqumatrix, OmegaPinv), lambda1 * I_Lambda_k))
        system_matrix = np.concatenate((system_matrix, lambda2 * P))
        y_tilde = np.concatenate(
            (measurements, np.zeros(I_Lambda_k.shape[0] + P.shape[0])))
        # solve
        #gamma = np.linalg.lstsq(system_matrix, y_tilde)[0]
        # Use fast version
        gamma = fast_lstsq(system_matrix, y_tilde)

        # Atom selection
        maxval = np.amax(np.absolute(gamma[Lambdahat]))
        maxrow = Lambdahat[np.argmax(np.absolute(gamma[Lambdahat]))]

        # Scale with GAP or OMP criterion
        #  alternatively: max(maxval, lambda1*maxval)
        if lambda1 > 1:
            maxval = (
                lambda1**2
            ) * maxval  # lamnda1**2 instead of lambda1 in order to match OMP
        # Exit condition
        if maxval < 1e-6:
            break

        # Remove selected rows
        Lambdahat = np.setdiff1d(Lambdahat, maxrow)

        # Another exit condition
        if Lambdahat.size == 0:
            break

    # Debias (project gamma onto columns of operator)
    gamma = np.dot(np.dot(operator, OmegaPinv), gamma)

    return np.dot(OmegaPinv, gamma)
Exemplo n.º 2
0
def unconstrained_analysis_pursuit(measurements, acqumatrix, operator, lambda1, lambda2, OmegaPinv=None, P=None):

    gammasize, signalsize = operator.shape
    gamma = np.zeros(gammasize)
    Lambdahat = np.arange(gammasize)
    if OmegaPinv is None:
        OmegaPinv = np.linalg.pinv(operator)
    if P is None:
        U,S,Vt = np.linalg.svd(OmegaPinv)
        P = Vt[-(gammasize-signalsize):,:]
    residual = measurements - np.dot(np.dot(acqumatrix, OmegaPinv), gamma)

    while True:  # exit from inside with break

        # Minimization problem
        I_Lambda_k = np.zeros((Lambdahat.size, gammasize))
        I_Lambda_k[range(Lambdahat.size), Lambdahat] = 1
        system_matrix = np.concatenate((np.dot(acqumatrix, OmegaPinv), lambda1 * I_Lambda_k))
        system_matrix = np.concatenate((system_matrix, lambda2 * P))
        y_tilde = np.concatenate((measurements, np.zeros(I_Lambda_k.shape[0] + P.shape[0])))
        # solve
        #gamma = np.linalg.lstsq(system_matrix, y_tilde)[0]
        # Use fast version
        gamma = fast_lstsq(system_matrix, y_tilde)

        # Atom selection
        maxval = np.amax(np.absolute(gamma[Lambdahat]))
        maxrow = Lambdahat[np.argmax(np.absolute(gamma[Lambdahat]))]

        # Scale with GAP or OMP criterion
        #  alternatively: max(maxval, lambda1*maxval)
        if lambda1 > 1:
            maxval = (lambda1**2)*maxval  # lamnda1**2 instead of lambda1 in order to match OMP
        # Exit condition
        if maxval < 1e-6:
            break

        # Remove selected rows
        Lambdahat = np.setdiff1d(Lambdahat, maxrow)

        # Another exit condition
        if Lambdahat.size == 0:
            break

    # Debias (project gamma onto columns of operator)
    gamma = np.dot(np.dot(operator, OmegaPinv), gamma)

    return np.dot(OmegaPinv, gamma)
Exemplo n.º 3
0
def _tst_recommended(X, Y, nsweep=300, tol=0.00001, xinitial=None, ro=None):

    colnorm = np.mean(np.sqrt((X**2).sum(0)))
    X = X / colnorm
    Y = Y / colnorm
    [n,p] = X.shape
    delta = float(n) / p

    if xinitial is None:
        xinitial = np.zeros(p)
    if ro == None:
        ro = 0.044417*delta**2 + 0.34142*delta + 0.14844

    k1 = int(math.floor(ro*n))
    k2 = int(math.floor(ro*n))

    #initialization
    x1 = xinitial.copy()
    I = []

    for sweep in np.arange(nsweep):
        r = Y - np.dot(X,x1)
        c = np.dot(X.T, r)
        i_csort = np.argsort(np.abs(c))
        I = np.union1d(I , i_csort[-k2:])

        # Make sure X[:,np.int_(I)] is a 2-dimensional matrix even if I has a single value (and therefore yields a column)
        if I.size is 1:
            a = np.reshape(X[:,np.int_(I)],(X.shape[0],1))
        else:
            a = X[:,np.int_(I)]
        #xt = np.linalg.lstsq(a, Y)[0]
        # Use fast version
        xt = fast_lstsq(a, Y)
        i_xtsort = np.argsort(np.abs(xt))

        J = I[i_xtsort[-k1:]]
        x1 = np.zeros(p)
        x1[np.int_(J)] = xt[i_xtsort[-k1:]]
        I = J.copy()
        if np.linalg.norm(Y-np.dot(X,x1)) / np.linalg.norm(Y) < tol:
            break

    return x1.copy()
Exemplo n.º 4
0
def _tst_recommended(X, Y, nsweep=300, tol=0.00001, xinitial=None, ro=None):

    colnorm = np.mean(np.sqrt((X**2).sum(0)))
    X = X / colnorm
    Y = Y / colnorm
    [n, p] = X.shape
    delta = float(n) / p

    if xinitial is None:
        xinitial = np.zeros(p)
    if ro == None:
        ro = 0.044417 * delta**2 + 0.34142 * delta + 0.14844

    k1 = int(math.floor(ro * n))
    k2 = int(math.floor(ro * n))

    #initialization
    x1 = xinitial.copy()
    I = []

    for sweep in np.arange(nsweep):
        r = Y - np.dot(X, x1)
        c = np.dot(X.T, r)
        i_csort = np.argsort(np.abs(c))
        I = np.union1d(I, i_csort[-k2:])

        # Make sure X[:,np.int_(I)] is a 2-dimensional matrix even if I has a single value (and therefore yields a column)
        if I.size is 1:
            a = np.reshape(X[:, np.int_(I)], (X.shape[0], 1))
        else:
            a = X[:, np.int_(I)]
        #xt = np.linalg.lstsq(a, Y)[0]
        # Use fast version
        xt = fast_lstsq(a, Y)
        i_xtsort = np.argsort(np.abs(xt))

        J = I[i_xtsort[-k1:]]
        x1 = np.zeros(p)
        x1[np.int_(J)] = xt[i_xtsort[-k1:]]
        I = J.copy()
        if np.linalg.norm(Y - np.dot(X, x1)) / np.linalg.norm(Y) < tol:
            break

    return x1.copy()
Exemplo n.º 5
0
def ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit, ilagmult, params):

    # This function aims to compute
    #    xhat = argmin || Omega(Lambdahat, :) * x ||_2   subject to  || y - M*x ||_2 <= epsilon.
    # arepr is the analysis representation corresponding to Lambdahat, i.e.,
    #    arepr = Omega(Lambdahat, :) * xhat.
    # The function also returns the lagrange multiplier in the process used to compute xhat.
    #
    # Inputs:
    #    y : observation/measurements of an unknown vector x0. It is equal to M*x0 + noise.
    #    M : Measurement matrix
    #    MH : M', the conjugate transpose of M
    #    Omega : analysis operator
    #    OmegaH : Omega', the conjugate transpose of Omega. Also, synthesis operator.
    #    Lambdahat : an index set indicating some rows of Omega.
    #    xinit : initial estimate that will be used for the conjugate gradient algorithm.
    #    ilagmult : initial lagrange multiplier to be used in
    #    params : parameters
    #        params.noise_level : this corresponds to epsilon above.
    #        params.max_inner_iteration : `maximum' number of iterations in conjugate gradient method.
    #        params.l2_accurary : the l2 accuracy parameter used in conjugate gradient method
    #        params.l2solver : if the value is 'pseudoinverse', then direct matrix computation (not conjugate gradient method) is used. Otherwise, conjugate gradient method is used.

    d = xinit.size
    lagmultmax = 1e5
    lagmultmin = 1e-4
    lagmultfactor = 2.0
    accuracy_adjustment_exponent = 4/5.
    lagmult = max(min(ilagmult, lagmultmax), lagmultmin)
    was_infeasible = 0
    was_feasible = 0

    #######################################################################
    ## Computation done using direct matrix computation from matlab. (no conjugate gradient method.)
    #######################################################################
    if params['l2solver'] == 'pseudoinverse':
        if 1:
            while True:
                alpha = math.sqrt(lagmult)

                # Build augmented matrix and measurements vector
                Omega_tilde = np.concatenate((M, alpha*Omega[Lambdahat,:]))
                y_tilde = np.concatenate((y, np.zeros(Lambdahat.size)))

                # Solve least-squares problem
                # FAST: Use QR and solve_triang() instead of lstsq()
                #xhat = np.linalg.lstsq(Omega_tilde, y_tilde)[0]
                xhat = fast_lstsq(Omega_tilde, y_tilde)
                #assert(np.linalg.norm(xhat-xhat2)<1e-10)

                # Check tolerance below required, and adjust Lagr multiplier accordingly
                temp = np.linalg.norm(y - np.dot(M,xhat), 2)
                if temp <= params['noise_level']:
                    was_feasible = True
                    if was_infeasible:
                        break
                    else:
                        lagmult = lagmult*lagmultfactor
                elif temp > params['noise_level']:
                    was_infeasible = True
                    if was_feasible:
                        xhat = xprev.copy()
                        break
                    lagmult = lagmult/lagmultfactor
                if lagmult < lagmultmin or lagmult > lagmultmax:
                    break
                xprev = xhat.copy()

            arepr = np.dot(Omega[Lambdahat, :], xhat)
            return xhat,arepr,lagmult


    ########################################################################
    ## Computation using conjugate gradient method.
    ########################################################################
    if hasattr(MH, '__call__'):
        b = MH(y)
    else:
        b = np.dot(MH, y)

    norm_b = np.linalg.norm(b, 2)
    xhat = xinit.copy()
    xprev = xinit.copy()
    residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b
    direction = -residual
    iter = 0

    while iter < params.max_inner_iteration:
        iter = iter + 1;
        alpha = np.linalg.norm(residual,2)**2 / np.dot(direction.T, TheHermitianMatrix(direction, M, MH, Omega, OmegaH, Lambdahat, lagmult));
        xhat = xhat + alpha*direction;
        prev_residual = residual.copy();
        residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b;
        beta = np.linalg.norm(residual,2)**2 / np.linalg.norm(prev_residual,2)**2;
        direction = -residual + beta*direction;

        if np.linalg.norm(residual,2)/norm_b < params['l2_accuracy']*(lagmult**(accuracy_adjustment_exponent)) or iter == params['max_inner_iteration']:
            if hasattr(M, '__call__'):
                temp = np.linalg.norm(y-M(xhat), 2);
            else:
                temp = np.linalg.norm(y-np.dot(M,xhat), 2);

            #if strcmp(class(Omega), 'function_handle')
            if hasattr(Omega, '__call__'):
                u = Omega(xhat);
                u = math.sqrt(lagmult)*np.linalg.norm(u(Lambdahat), 2);
            else:
                u = math.sqrt(lagmult)*np.linalg.norm(Omega[Lambdahat,:]*xhat, 2);


            if temp <= params['noise_level']:
                was_feasible = True;
                if was_infeasible:
                    break;
                else:
                    lagmult = lagmultfactor*lagmult;
                    residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b;
                    direction = -residual;
                    iter = 0;
            elif temp > params['noise_level']:
                lagmult = lagmult/lagmultfactor;
                if was_feasible:
                    xhat = xprev.copy();
                    break;
                was_infeasible = True;
                residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat, lagmult) - b;
                direction = -residual;
                iter = 0;
            if lagmult > lagmultmax or lagmult < lagmultmin:
                break;
            xprev = xhat.copy();

    print 'fidelity_error=',temp

    ##
    # Compute analysis representation for xhat
    ##
    if hasattr(Omega, '__call__'):
        temp = Omega(xhat);
        arepr = temp(Lambdahat);
    else:    ## here Omega is assumed to be a matrix
        arepr = np.dot(Omega[Lambdahat, :], xhat);

    return xhat,arepr,lagmult
Exemplo n.º 6
0
def ArgminOperL2Constrained(y, M, MH, Omega, OmegaH, Lambdahat, xinit,
                            ilagmult, params):

    # This function aims to compute
    #    xhat = argmin || Omega(Lambdahat, :) * x ||_2   subject to  || y - M*x ||_2 <= epsilon.
    # arepr is the analysis representation corresponding to Lambdahat, i.e.,
    #    arepr = Omega(Lambdahat, :) * xhat.
    # The function also returns the lagrange multiplier in the process used to compute xhat.
    #
    # Inputs:
    #    y : observation/measurements of an unknown vector x0. It is equal to M*x0 + noise.
    #    M : Measurement matrix
    #    MH : M', the conjugate transpose of M
    #    Omega : analysis operator
    #    OmegaH : Omega', the conjugate transpose of Omega. Also, synthesis operator.
    #    Lambdahat : an index set indicating some rows of Omega.
    #    xinit : initial estimate that will be used for the conjugate gradient algorithm.
    #    ilagmult : initial lagrange multiplier to be used in
    #    params : parameters
    #        params.noise_level : this corresponds to epsilon above.
    #        params.max_inner_iteration : `maximum' number of iterations in conjugate gradient method.
    #        params.l2_accurary : the l2 accuracy parameter used in conjugate gradient method
    #        params.l2solver : if the value is 'pseudoinverse', then direct matrix computation (not conjugate gradient method) is used. Otherwise, conjugate gradient method is used.

    d = xinit.size
    lagmultmax = 1e5
    lagmultmin = 1e-4
    lagmultfactor = 2.0
    accuracy_adjustment_exponent = 4 / 5.
    lagmult = max(min(ilagmult, lagmultmax), lagmultmin)
    was_infeasible = 0
    was_feasible = 0

    #######################################################################
    ## Computation done using direct matrix computation from matlab. (no conjugate gradient method.)
    #######################################################################
    if params['l2solver'] == 'pseudoinverse':
        if 1:
            while True:
                alpha = math.sqrt(lagmult)

                # Build augmented matrix and measurements vector
                Omega_tilde = np.concatenate((M, alpha * Omega[Lambdahat, :]))
                y_tilde = np.concatenate((y, np.zeros(Lambdahat.size)))

                # Solve least-squares problem
                # FAST: Use QR and solve_triang() instead of lstsq()
                #xhat = np.linalg.lstsq(Omega_tilde, y_tilde)[0]
                xhat = fast_lstsq(Omega_tilde, y_tilde)
                #assert(np.linalg.norm(xhat-xhat2)<1e-10)

                # Check tolerance below required, and adjust Lagr multiplier accordingly
                temp = np.linalg.norm(y - np.dot(M, xhat), 2)
                if temp <= params['noise_level']:
                    was_feasible = True
                    if was_infeasible:
                        break
                    else:
                        lagmult = lagmult * lagmultfactor
                elif temp > params['noise_level']:
                    was_infeasible = True
                    if was_feasible:
                        xhat = xprev.copy()
                        break
                    lagmult = lagmult / lagmultfactor
                if lagmult < lagmultmin or lagmult > lagmultmax:
                    break
                xprev = xhat.copy()

            arepr = np.dot(Omega[Lambdahat, :], xhat)
            return xhat, arepr, lagmult

    ########################################################################
    ## Computation using conjugate gradient method.
    ########################################################################
    if hasattr(MH, '__call__'):
        b = MH(y)
    else:
        b = np.dot(MH, y)

    norm_b = np.linalg.norm(b, 2)
    xhat = xinit.copy()
    xprev = xinit.copy()
    residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat,
                                  lagmult) - b
    direction = -residual
    iter = 0

    while iter < params.max_inner_iteration:
        iter = iter + 1
        alpha = np.linalg.norm(residual, 2)**2 / np.dot(
            direction.T,
            TheHermitianMatrix(direction, M, MH, Omega, OmegaH, Lambdahat,
                               lagmult))
        xhat = xhat + alpha * direction
        prev_residual = residual.copy()
        residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH, Lambdahat,
                                      lagmult) - b
        beta = np.linalg.norm(residual, 2)**2 / np.linalg.norm(
            prev_residual, 2)**2
        direction = -residual + beta * direction

        if np.linalg.norm(residual, 2) / norm_b < params['l2_accuracy'] * (
                lagmult**(accuracy_adjustment_exponent
                          )) or iter == params['max_inner_iteration']:
            if hasattr(M, '__call__'):
                temp = np.linalg.norm(y - M(xhat), 2)
            else:
                temp = np.linalg.norm(y - np.dot(M, xhat), 2)

            #if strcmp(class(Omega), 'function_handle')
            if hasattr(Omega, '__call__'):
                u = Omega(xhat)
                u = math.sqrt(lagmult) * np.linalg.norm(u(Lambdahat), 2)
            else:
                u = math.sqrt(lagmult) * np.linalg.norm(
                    Omega[Lambdahat, :] * xhat, 2)

            if temp <= params['noise_level']:
                was_feasible = True
                if was_infeasible:
                    break
                else:
                    lagmult = lagmultfactor * lagmult
                    residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH,
                                                  Lambdahat, lagmult) - b
                    direction = -residual
                    iter = 0
            elif temp > params['noise_level']:
                lagmult = lagmult / lagmultfactor
                if was_feasible:
                    xhat = xprev.copy()
                    break
                was_infeasible = True
                residual = TheHermitianMatrix(xhat, M, MH, Omega, OmegaH,
                                              Lambdahat, lagmult) - b
                direction = -residual
                iter = 0
            if lagmult > lagmultmax or lagmult < lagmultmin:
                break
            xprev = xhat.copy()

    print 'fidelity_error=', temp

    ##
    # Compute analysis representation for xhat
    ##
    if hasattr(Omega, '__call__'):
        temp = Omega(xhat)
        arepr = temp(Lambdahat)
    else:  ## here Omega is assumed to be a matrix
        arepr = np.dot(Omega[Lambdahat, :], xhat)

    return xhat, arepr, lagmult