Example #1
0
 def __init__(self, basef, 
              translate=True, 
              rotate=False, 
              conditioning=None, 
              asymmetry=None,
              oscillate=False, 
              penalize=None,
              ):
     FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
     self.desiredValue = basef.desiredValue            
     self.toBeMinimized = basef.toBeMinimized
     
     if translate:            
         self.xopt = (rand(self.xdim) - 0.5) * 9.8
         
     self._diags = eye(self.xdim)            
     self._R = eye(self.xdim)            
     self._Q = eye(self.xdim)            
     
     if conditioning is not None:
         self._diags = generateDiags(conditioning, self.xdim)
     if rotate:
         self._R = orth(rand(basef.xdim, basef.xdim))        
         if conditioning:
             self._Q = orth(rand(basef.xdim, basef.xdim))
                     
     tmp = lambda x: dot(self._Q, dot(self._diags, dot(self._R, x-self.xopt)))
     if asymmetry is not None:
         tmp2 = tmp
         tmp = lambda x: asymmetrify(tmp2(x), asymmetry)
     if oscillate:
         tmp3 = tmp
         tmp = lambda x: oscillatify(tmp3(x))
     
     self.f = lambda x: basef.f(tmp(x))
Example #2
0
def subspace(a, b, deg=True):
    """
    Angle between two subspaces specified by the columns of a and b
    Ported from MATLAB 'subspace' function

    Parameters
    ----------
    a : matrix
    b : matrix
    deg : bool
        return degree or radian

    Returns
    -------
    double
        angle
    """
    warnings.warn(
        "Deprecated. Use scipy.linalg.subspace_angles instead.", FutureWarning
    )
    oa = linalg.orth(a)
    ob = linalg.orth(b)
    if oa.shape[1] < ob.shape[1]:
        oa, ob = ob.copy(), oa.copy()
    ob -= oa @ (oa.T @ ob)
    rad = np.arcsin(min(1, linalg.norm(ob, ord=2)))
    return np.degrees(rad) if deg else rad
Example #3
0
def principal_angles(A, B):
    '''Compute the principal angles between subspaces A and B.

    The algorithm for computing the principal angles is described in :
    A. V. Knyazev and M. E. Argentati,
    Principal Angles between Subspaces in an A-Based Scalar Product: 
    Algorithms and Perturbation Estimates. SIAM Journal on Scientific Computing, 
    23 (2002), no. 6, 2009-2041.
    http://epubs.siam.org/sam-bin/dbq/article/37733
    '''    
    # eps = np.finfo(np.float64).eps**.981
    # for i in range(A.shape[1]):
    #     normi = la.norm(A[:,i],np.inf)
    #     if normi > eps: A[:,i] = A[:,i]/normi
    # for i in range(B.shape[1]):
    #     normi = la.norm(B[:,i],np.inf)
    #     if normi > eps: B[:,i] = B[:,i]/normi
    QA = sl.orth(A)
    QB = sl.orth(B)
    _, s, Zs = svd(QA.T.dot(QB), full_matrices=False)
    s = np.minimum(s, ones_like(s))
    theta = np.maximum(np.arccos(s), np.zeros_like(s))
    V = QB.dot(Zs)
    idxSmall = s > np.sqrt(2.)/2.
    if np.any(idxSmall):
        RB = V[:,idxSmall]
        _, x, _ = svd(RB-QA.dot(QA.T.dot(RB)),full_matrices=False)
        thetaSmall = np.flipud(np.maximum(arcsin(np.minimum(x, ones_like(x))), zeros_like(x)))
        theta[idxSmall] = thetaSmall
    return theta
Example #4
0
 def __init__(self, *args, **kwargs):
     MultiModalFunction.__init__(self, *args, **kwargs)
     self._mu0 = 2.5
     self._s = 1 - 1 / (2 * sqrt(self.xdim + 20) - 8.2)
     self._mu1 = -sqrt((self._mu0 ** 2 - 1) / self._s)
     self._signs = sign(randn(self.xdim))
     self._R1 = orth(rand(self.xdim, self.xdim))
     self._R2 = orth(rand(self.xdim, self.xdim))
     self._diags = generateDiags(100, self.xdim)
Example #5
0
def fubini_study(A, B):
    '''
    fubini_study(A, B) Compute the Fubini-Study distance
    Compute the Fubini-Study distance based on principal angles between A and B
    as d=\acos{ \prod_i \theta_i}
    '''
    if A.shape != B.shape:
        raise ValueError('Atoms have different dim (', A.shape, ' and ', B.shape,'). Error raised in fubini_study(A, B)')
    if np.allclose(A, B): return 0.
    return arccos(det(sl.orth(A).T.dot(sl.orth(B))))
    def _create_SDP(self):
        """ Creates the SDP knockoff of X"""
 
        # Check for rank deficiency (will add later).
 
        # SVD and come up with perpendicular matrix
        U, d, V = nplin.svd(self.X,full_matrices=True) 
        d[d<0] = 0
        U_perp = U[:,self.p:(2*self.p)]
        if self.randomize:
            U_perp = np.dot(U_perp,splin.orth(npran.randn(self.p,self.p)))
 
        # Compute the Gram matrix and its (pseudo)inverse.
        G     = np.dot(V.T * d**2 ,V)
        G_inv = np.dot(V.T * d**-2,V)
 
        # Optimize the parameter s of Equation 1.3 using SDP.
        self.s = solve_sdp(G)
        self.s[s <= self.zerotol] = 0
 
        # Construct the knockoff according to Equation 1.4:
        C_U,C_d,C_V = nplin.svd(2*np.diag(s) - (self.s * G_inv.T).T * self.s)
        C_d[C_d < 0] = 0
        X_ko = self.X - np.dot(self.X,G_inv*s) + np.dot(U_perp*np.sqrt(C_d),C_V)
        self.X_lrg = np.concatenate((self.X,X_ko), axis=1)
def bench_lobpcg_mikota():
    print()
    print('                 lobpcg benchmark using mikota pairs')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 10
    for n in 128, 256, 512, 1024, 2048:
        shape = (n, n)
        A, B = _mikota_pair(n)
        desired_evs = np.square(np.arange(1, m+1))

        tt = time.clock()
        X = rand(n, m)
        X = orth(X)
        LorU, lower = cho_factor(A, lower=0, overwrite_a=0)
        M = LinearOperator(shape,
                matvec=partial(_precond, LorU, lower),
                matmat=partial(_precond, LorU, lower))
        eigs, vecs = lobpcg(A, X, B, M, tol=1e-4, maxiter=40)
        eigs = sorted(eigs)
        elapsed = time.clock() - tt
        assert_allclose(eigs, desired_evs)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        w = eigh(A, B, eigvals_only=True, eigvals=(0, m-1))
        elapsed = time.clock() - tt
        assert_allclose(w, desired_evs)
        print(fmt % (shape, m, 'eigh', elapsed))
Example #8
0
def chordal(A, B):
    '''
    chordal(A, B) Compute the chordal distance
    Compute the chordal distance between A and B
    as d=\sqrt{K - ||\bar{A}^T\bar{B}||_F^2}
    where K is the rank of A and B, || . ||_F is the Frobenius norm,
    \bar{A} is the orthogonal basis associated with A and the same goes for B.
    '''
    if A.shape != B.shape:
        raise ValueError('Atoms have not the same dimension (', A.shape, ' and ', B.shape,'). Error raised in chordal(A, B)')
    
    if np.allclose(A, B): return 0.
    else: 
        d2 = A.shape[1] - norm(sl.orth(A).T.dot(sl.orth(B)), 'fro')**2
        if d2 < 0.: return sqrt(abs(d2))
        else: return sqrt(d2)
Example #9
0
def  addblock_svd_update( Uarg, Sarg, Varg, Aarg, force_orth = False):
  U = Varg
  V = Uarg
  S = np.eye(len(Sarg),len(Sarg))*Sarg
  A = Aarg.T
  
  current_rank = U.shape[1]
  m = np.dot(U.T,A)
  p = A - np.dot(U,m)
  P = lin.orth(p)
  Ra = np.dot(P.T,p)
  z = np.zeros(m.shape)
  K = np.vstack(( np.hstack((S,m)), np.hstack((z.T,Ra)) ))
  tUp,tSp,tVp = lin.svd(K);
  tUp = tUp[:,:current_rank]
  tSp = np.diag(tSp[:current_rank])
  tVp = tVp[:,:current_rank]
  Sp = tSp
  Up = np.dot(np.hstack((U,P)),tUp)
  Vp = np.dot(V,tVp[:current_rank,:])
  Vp = np.vstack((Vp, tVp[current_rank:tVp.shape[0], :]))
  
  if force_orth:
    UQ,UR = lin.qr(Up,mode='economic')
    VQ,VR = lin.qr(Vp,mode='economic')
    tUp,tSp,tVp = lin.svd( np.dot(np.dot(UR,Sp),VR.T));
    tSp = np.diag(tSp)
    Up = np.dot(UQ,tUp)
    Vp = np.dot(VQ,tVp)
    Sp = tSp;

  Up1 = Vp;
  Vp1 = Up;
    
  return Up1,Sp,Vp1
Example #10
0
def fastica_defl(X, nIC=None, guess=None,
             nonlinfn = pow3nonlin,
             termtol = 5e-7, maxiters = 2e3):
    nPC, siglen = X.shape
    nIC = nIC or nPC-1
    guess = guess or randn(nPC,nIC)

    if _orth_loaded:
        guess = orth(guess)

    B = zeros(guess.shape, np.float64)

    errvec = []
    icc = 0
    while icc < nIC:
        w = randn(nPC,1) - 0.5
        w -= dot(dot(B, transp(B)), w)
        w /= norm(w)

        wprev = zeros(w.shape)
        for i in xrange(long(maxiters) +1):
            w -= dot(dot(B, transp(B)), w)
            w /= norm(w)
            #wprev = w.copy()
            if (norm(w-wprev) < termtol) or (norm(w + wprev) < termtol):
                B[:,icc]  = transp(w)
                icc += 1
                break
            wprev = w.copy()
    return B.real, errvec
Example #11
0
def random_walk(G, initial_prob, subspace_dim=3, walk_steps=3):
    assert type(initial_prob) == np.ndarray, "Initial probability distribution is not a numpy array"

    # Transform the adjacent matrix to a laplacian matrix P
    P = adj_to_laplacian(G)

    prob_matrix = np.zeros((G.shape[0], subspace_dim))
    prob_matrix[:, 0] = initial_prob
    for i in range(1, subspace_dim):
        prob_matrix[:, i] = np.dot(prob_matrix[:, i - 1], P)

    orth_prob_matrix = splin.orth(prob_matrix)

    for i in range(walk_steps):
        temp = np.dot(orth_prob_matrix.T, P)
        orth_prob_matrix = splin.orth(temp.T)
    return orth_prob_matrix
Example #12
0
def calc_subspace_proj_error(U, U_hat, ortho=False):
    """Calculate the normalized projection error between two orthogonal subspaces.
    Keyword arguments:
    U: ground truth subspace
    U_hat: estimated subspace
    """
    if not ortho:
        U = splinalg.orth(U)
        U_hat = splinalg.orth(U_hat)

    I = np.identity(U.shape[0])
    top = np.linalg.norm((I - U_hat @ U_hat.T) @ U, ord="fro")
    bottom = np.linalg.norm(U, ord="fro")

    error = float(top) / float(bottom)

    return error
Example #13
0
def condex(n, k=4, theta=100):
    """
    CONDEX   `Counterexamples' to matrix condition number estimators.
         CONDEX(N, K, THETA) is a `counterexample' matrix to a condition
         estimator.  It has order N and scalar parameter THETA (default 100).
         If N is not equal to the `natural' size of the matrix then
         the matrix is padded out with an identity matrix to order N.
         The matrix, its natural size, and the estimator to which it applies
         are specified by K (default K = 4) as follows:
             K = 1:   4-by-4,     LINPACK (RCOND)
             K = 2:   3-by-3,     LINPACK (RCOND)
             K = 3:   arbitrary,  LINPACK (RCOND) (independent of THETA)
             K = 4:   N >= 4,     SONEST (Higham 1988)
         (Note that in practice the K = 4 matrix is not usually a
          counterexample because of the rounding errors in forming it.)

         References:
         A.K. Cline and R.K. Rew, A set of counter-examples to three
            condition number estimators, SIAM J. Sci. Stat. Comput.,
            4 (1983), pp. 602-611.
         N.J. Higham, FORTRAN codes for estimating the one-norm of a real or
            complex matrix, with applications to condition estimation
            (Algorithm 674), ACM Trans. Math. Soft., 14 (1988), pp. 381-396.
    """

    if k == 1:  # Cline and Rew (1983), Example B.

        a = np.array([[1, -1, -2 * theta, 0], [0, 1, theta, -theta], [0, 1, 1 + theta, -(theta + 1)], [0, 0, 0, theta]])

    elif k == 2:  # Cline and Rew (1983), Example C.

        a = np.array([[1, 1 - 2 / theta ** 2, -2], [0, 1 / theta, -1 / theta], [0, 0, 1]])

    elif k == 3:  # Cline and Rew (1983), Example D.

        a = rogues.triw(n, -1).T
        a[-1, -1] = -1

    elif k == 4:  # Higham (1988), p. 390.

        x = np.ones((n, 3))  # First col is e
        x[1:n, 1] = np.zeros(n - 1)  # Second col is e(1)

        # Third col is special vector b in SONEST
        x[:, 2] = ((-1) ** np.arange(n)) * (1 + np.arange(n) / (n - 1))

        # Q*Q' is now the orthogonal projector onto span(e(1),e,b)).
        q = sl.orth(x)
        p = np.eye(n) - np.asmatrix(q) * np.asmatrix(q.T)
        a = np.eye(n) + theta * p

    # Pad out with identity as necessary.
    m, m = a.shape
    if m < n:
        for i in range(n - 1, m, -1):
            a[i, i] = 1

    return a
Example #14
0
def shadow_volume(bases):
  _high_dimension = len(bases.T)
  _low_dimension = len(bases)
  orth_bases = orth(bases.T)
  sum = 0.0
  for indices in itertools.combinations(range(_high_dimension), _low_dimension):
    sub_matrix = orth_bases[indices, :]
    sum += abs(det(sub_matrix))
  return sum
Example #15
0
 def __init__(self, *args, **kwargs):
     MultiModalFunction.__init__(self, *args, **kwargs)
     self._opts = (rand((self.numPeaks, self.xdim)) - 0.5) * 9.8
     self._opts[0] = (rand(self.xdim) - 0.5) * 8
     alphas = [power(self.maxCond, 2 * i / float(self.numPeaks - 2)) for i in range(self.numPeaks - 1)]
     shuffle(alphas)
     self._covs = [generateDiags(alpha, self.xdim, shuffled=True) / power(alpha, 0.25) for alpha in [self.optCond] + alphas]
     self._R = orth(rand(self.xdim, self.xdim))
     self._ws = [10] + [1.1 + 8 * i / float(self.numPeaks - 2) for i in range(self.numPeaks - 1)]
Example #16
0
def test_1():
    n_roots = 1000
    dim = 1000
    offset = 0
    A = DavTestMat(dim, offset)
    guess = LIN.orth(np.random.rand(dim, n_roots))
    evals, evecs = solvers.davidson(A, guess)
    evals_ref = np.linalg.eigvalsh(A.A)
    assert np.allclose(evals, evals_ref)
Example #17
0
def test_orth():
    m = 4
    k = 3
    G_temp = np.random.normal(0, 1, [m, k])

    G = orth(G_temp)

    print G_temp.shape
    print G.shape
Example #18
0
def main():
    """Main function"""

    # Init signal
    sig = np.zeros(shape=(SIG_LEN, ))
    pos_not_zeros = np.random.randint(0, SIG_LEN, size=NUM_NOT_ZEROS)
    sig[pos_not_zeros] = np.sign(np.random.randn(NUM_NOT_ZEROS) - 0.5)

    sensing_matrix = np.random.randn(MEASURED_LEN, SIG_LEN)
    sensing_matrix = linalg.orth(sensing_matrix.T)  # pylint: disable=no-member
    sensing_matrix = sensing_matrix.T

    measured_sig = np.matmul(sensing_matrix, sig)

    sensing_matrix_pinv = linalg.pinv(sensing_matrix)
    x_min_norm = np.matmul(sensing_matrix_pinv, measured_sig)

    tau = 0.1 * np.max(np.matmul(sensing_matrix.T, measured_sig))

    initial_solution = x_min_norm + 0.01 * (np.random.randn(SIG_LEN))

    x_gpsr = gpsr.gpsr_bb(initial_solution,
                          sensing_matrix,
                          measured_sig,
                          tau=tau,
                          alpha0=5,
                          alpha_lims=(1e-30, 1e+30),
                          tolerance=0.0001)

    x_debiased = gpsr.debaising(x_gpsr,
                                sensing_matrix,
                                measured_sig,
                                tol=0.01,
                                fix_lev=0.1,
                                iter_max=12)

    print(tau)

    _, axes = plt.subplots(2, 2)
    axes[0, 0].plot(sig)
    axes[0, 0].set_title('True signal')

    axes[0, 1].plot(x_debiased, '-r')
    axes[0, 1].set_title('Reconstructed after debiasing')

    axes[1, 0].plot(x_gpsr, '-b')
    axes[1, 0].set_title('Reconstructed after GPSR')

    axes[1, 1].plot(sig - x_gpsr, '.b')
    axes[1, 1].plot(sig - x_debiased, '.r')
    axes[1, 1].set_title('Difference from true signal')

    print('MSE:',
          np.dot(sig - x_debiased, sig - x_debiased) / SIG_LEN,
          np.dot(sig - x_gpsr, sig - x_gpsr) / SIG_LEN)

    plt.show()
Example #19
0
    def generate(designers,
                 size,
                 inputs=None,
                 outputs=None,
                 is_coupled=True,
                 random=np.random):
        if inputs is None:
            # try to assign equally among designers
            inputs = [
                designers[int(i // (size / len(designers)))]
                for i in range(size)
            ]
        num_inputs = [
            np.sum(np.array(inputs) == designer).item()
            for designer in designers
        ]
        if outputs is None:
            # try to assign equally among designers
            outputs = [
                designers[int(i // (size / len(designers)))]
                for i in range(size)
            ]
        num_outputs = [
            np.sum(np.array(outputs) == designer).item()
            for designer in designers
        ]

        coupling = np.zeros((size, size))
        if is_coupled:
            # coupling matrix is orthonormal basis of random matrix
            coupling = orth(random.rand(size, size))
        else:
            # coupling matrix has random 1/-1 along diagonal
            coupling = np.diag(2 * random.randint(0, 2, size) - 1)

        # find a target with no solution values "close" to initial condition
        solution = np.zeros(size)
        while np.any(np.abs(solution) <= 0.20):
            target = orth(2 * random.rand(size, 1) - 1)
            # solve using dot product of coupling transpose and target
            solution = np.matmul(coupling.T, target)

        return Task(designers, num_inputs, num_outputs, coupling.tolist(),
                    target[:, 0].tolist(), inputs, outputs)
Example #20
0
    def fit(self, data):
        """
        Fit independent components using an iterative fixed-point algorithm

        Parameters
        ----------
        data: RDD of (tuple, array) pairs, or RowMatrix
            Data to estimate independent components from

        Returns
        ----------
        self : returns an instance of self.
        """

        if type(data) is not RowMatrix:
            data = RowMatrix(data)

        # reduce dimensionality
        svd = SVD(k=self.k, method=self.svdmethod).calc(data)

        # whiten data
        whtmat = real(dot(inv(diag(svd.s/sqrt(data.nrows))), svd.v))
        unwhtmat = real(dot(transpose(svd.v), diag(svd.s/sqrt(data.nrows))))
        wht = data.times(whtmat.T)

        # do multiple independent component extraction
        if self.seed != 0:
            random.seed(self.seed)
        b = orth(random.randn(self.k, self.c))
        b_old = zeros((self.k, self.c))
        iter = 0
        minabscos = 0
        errvec = zeros(self.maxiter)

        while (iter < self.maxiter) & ((1 - minabscos) > self.tol):
            iter += 1
            # update rule for pow3 non-linearity (TODO: add others)
            b = wht.rows().map(lambda x: outer(x, dot(x, b) ** 3)).sum() / wht.nrows - 3 * b
            # make orthogonal
            b = dot(b, real(sqrtm(inv(dot(transpose(b), b)))))
            # evaluate error
            minabscos = min(abs(diag(dot(transpose(b), b_old))))
            # store results
            b_old = b
            errvec[iter-1] = (1 - minabscos)

        # get un-mixing matrix
        w = dot(transpose(b), whtmat)

        # get components
        sigs = data.times(w.T).rdd

        self.w = w
        self.sigs = sigs

        return self
def test_lobpcg(read_guess=True, tol=1e-5):
    X = orth(randn(N, m))
    if read_guess:
        X = x0

    try:
        λ, v = lobpcg(A, X, largest=False, M=P, maxiter=100, tol=tol)
        assert np.max(np.abs(λref - λ)) < tol
    except np.linalg.LinAlgError as e:
        print("ERROR: ", str(e))
Example #22
0
def get_RndSymPosMatrix(size=FEATURE_SIZE, divide=DIV):
    D = np.diag(np.random.random_sample([
        size,
    ])) / divide
    V = np.random.rand(size, size)
    U = orth(V)
    D = mat(D)
    U = mat(U)
    A = U.I * D * U
    return A
Example #23
0
 def __init__(self, d, q, m=1):
     self.D = d
     self.Q = q
     self.M = m
     self.mean = [
         100 * orth(np.random.randn(self.D, self.Q)) for i in range(self.M)
     ]
     self.cov = [np.eye(self.Q) for i in range(self.M)]
     self.wtw = [np.eye(self.Q) for i in range(self.M)]
     self.calc_wtw()
Example #24
0
def init(X, init, ncomp):
    N, K = X[0].shape[0], len(X)
    if init == 'random':
        A = orth(np.random.rand(N, ncomp))
    elif init == 'nvecs':
        S = np.zeros(N, N)
        for k in range(K):
            S = S + X[k] + X[k].T
        _, A = eigsh(S, ncomp)
    return A
Example #25
0
def init(X, init, ncomp):
    N, K = X[0].shape[0], len(X)
    if init == 'random':
        A = orth(rand(N, ncomp))
    elif init == 'nvecs':
        S = zeros(N, N)
        for k in range(K):
            S = S + X[k] + X[k].T
        _, A = eigsh(S, ncomp)
    return A
Example #26
0
def minimize_max_squared_norm(known_bases):
    # random_bases = random.rand(low_dimension, high_dimension)
    res_bh = basinhopping(objective_function,
                          known_bases,
                          T=100,
                          niter=10,
                          disp=True)
    optimal_bases = res_bh.x.reshape((low_dimension, high_dimension))
    orth_optimal_bases = orth(optimal_bases.T).T
    return (res_bh.fun, orth_optimal_bases)
Example #27
0
def random_walk(G, initial_prob, subspace_dim=3, walk_steps=3):
    assert type(
        initial_prob
    ) == np.ndarray, "Initial probability distribution is not a numpy array"

    # Transform the adjacent matrix to a laplacian matrix P
    P = adj_to_laplacian(G)

    prob_matrix = np.zeros((G.shape[0], subspace_dim))
    prob_matrix[:, 0] = initial_prob
    for i in range(1, subspace_dim):
        prob_matrix[:, i] = np.dot(prob_matrix[:, i - 1], P)

    orth_prob_matrix = splin.orth(prob_matrix)

    for i in range(walk_steps):
        temp = np.dot(orth_prob_matrix.T, P)
        orth_prob_matrix = splin.orth(temp.T)
    return orth_prob_matrix
Example #28
0
def Cpp_lrrA(data):
    print "Cpp_LRR", "data:", data.shape
    Q = linalg.orth(np.transpose(data))
    A = np.dot(data, Q)
    lam = 0.1
    Z, E = lrrA(np.matrix(data), np.matrix(A), lam)
    Z = np.dot(Q, Z)
    Z = np.abs(Z) + np.abs(np.transpose(Z))
    print Z
    return [np.array(Z)]
Example #29
0
    def transform(self, X: NDArray):
        # ints as the sensitive index are ok, just have to handle them a little differently
        if isinstance(self.sens_idxs, int):
            orth_vecs = orth(X[:, self.sens_idxs].reshape(-1, 1))
        else:
            orth_vecs = orth(X[:, self.sens_idxs])

        P = np.zeros((orth_vecs.shape[0], orth_vecs.shape[0]))
        for i in range(orth_vecs.shape[1]):
            P += orth_vecs[:, i].reshape(-1, 1) @ orth_vecs[:, i].reshape(
                1, -1)

        R = (np.eye(P.shape[0]) - P) @ X

        if self.lmbda:
            for j in range(R.shape[1]):
                R[:, j] = R[:, j] + self.lmbda * (X[:, j] - R[:, j])

        return R
 def get_A():
     D = np.diag(np.random.random_sample([
         size,
     ])) / divide
     V = np.random.rand(size, size)
     U = orth(V)
     D = mat(D)
     U = mat(U)
     A = U.I * D * U
     return A
Example #31
0
def orth(df, cols=None, index=None):
    cols = df.columns if cols is None else cols
    index = df.index if index is None else index
    a2 = df.value

    orthed_a = np.array(linalg.orth(a2, rcond=0), dtype=float)
    if orthed_a.shape == a2.shape:
        return pd.DataFrame(orthed_a, index=index, columns=cols)
    else:
        raise ValueError('exists highly related factors, please check data!')
    def get_gram_T_Matrix(gram, type):
        """
            Function used to obtain the T matrix of a gramian matrix.
            Calculates the range space and null space of the given matrix and
                organizes it in [orth , null]
            Requires:
                Numpy Array (Gramian matrix)
            Returns:
                Numpy Array: T Matrix
        """
        if type == 'ctrb':
            orth_ = spla.orth(gram)
            null_ = spla.null_space(gram.transpose())
        elif type == 'obsv':
            orth_ = spla.orth(gram.transpose())
            null_ = spla.null_space(gram)

        T = np.hstack([orth_, null_])
        return T
Example #33
0
 def __init__(self, *args, **kwargs):
     MultiModalFunction.__init__(self, *args, **kwargs)
     print((self.numPeaks, self.xdim))
     self._opts = [(rand(self.xdim) - 0.5) * 8]
     self._opts.extend([(rand(self.xdim) - 0.5) * 9.8 for _ in range(self.numPeaks-1)])
     alphas = [power(self.maxCond, 2 * i / float(self.numPeaks - 2)) for i in range(self.numPeaks - 1)]
     shuffle(alphas)
     self._covs = [generateDiags(alpha, self.xdim, shuffled=True) / power(alpha, 0.25) for alpha in [self.optCond] + alphas]
     self._R = orth(rand(self.xdim, self.xdim))
     self._ws = [10] + [1.1 + 8 * i / float(self.numPeaks - 2) for i in range(self.numPeaks - 1)]
Example #34
0
 def rvs(self, n=1):
     """
     It ignores the dimension ``n``.
     """
     D = self.D
     d = self.d
     k = self.k
     W_sub = orth(randn(D - k, d - k))
     return np.vstack([np.hstack([W_sub, np.zeros((D - k, k))]),
                     np.hstack([np.zeros((k, d - k)), np.eye(k)])]).flatten()
Example #35
0
def ica(data, k, c, svdmethod="direct", maxiter=100, tol=0.000001, seed=0):
    """Perform independent components analysis

    :param: data: RDD of data points
    :param k: number of principal components to use
    :param c: number of independent components to find
    :param maxiter: maximum number of iterations (default = 100)
    :param: tol: tolerance for change in estimate (default = 0.000001)

    :return w: the mixing matrix
    :return: sigs: the independent components

    TODO: also return unmixing matrix
    """
    # get count
    n = data.count()

    # reduce dimensionality
    scores, latent, comps = svd(data, k, meansubtract=0, method=svdmethod)

    # whiten data
    whtmat = real(dot(inv(diag(latent / sqrt(n))), comps))
    unwhtmat = real(dot(transpose(comps), diag(latent / sqrt(n))))
    wht = data.mapValues(lambda x: dot(whtmat, x))

    # do multiple independent component extraction
    if seed != 0:
        random.seed(seed)
    b = orth(random.randn(k, c))
    b_old = zeros((k, c))
    iter = 0
    minabscos = 0
    errvec = zeros(maxiter)

    while (iter < maxiter) & ((1 - minabscos) > tol):
        iter += 1
        # update rule for pow3 non-linearity (TODO: add others)
        b = wht.map(lambda (_, v): v).map(
            lambda x: outer(x,
                            dot(x, b)**3)).sum() / n - 3 * b
        # make orthogonal
        b = dot(b, real(sqrtm(inv(dot(transpose(b), b)))))
        # evaluate error
        minabscos = min(abs(diag(dot(transpose(b), b_old))))
        # store results
        b_old = b
        errvec[iter - 1] = (1 - minabscos)

    # get un-mixing matrix
    w = dot(transpose(b), whtmat)

    # get components
    sigs = data.mapValues(lambda x: dot(w, x))

    return w, sigs
Example #36
0
    def sample_k(self, k=5):

        if not hasattr(self,'A'):
            self.compute_kernel(kernel_type='cos-sim')

        eigen_vals, eigen_vec = eig(self.A)
        eigen_vals =np.real(eigen_vals)
        eigen_vec =np.real(eigen_vec)
        eigen_vec = eigen_vec.T
        N =self.A.shape[0]
        Z= list(range(N))

        if k==-1:
            probs = eigen_vals/(eigen_vals+1)
            jidx = np.array(np.random.rand(N)<=probs)    # set j in paper

        else:
            jidx = sample_k_eigenvecs(eigen_vals, k)

        V = eigen_vec[jidx]           # Set of vectors V in paper
        num_v = len(V)

        Y = []
        while num_v>0:
            Pr = np.sum(V**2, 0)/np.sum(V**2)
            y_i=np.argmax(np.array(np.random.rand() <= np.cumsum(Pr), np.int32))

            # pdb.set_trace()
            Y.append(y_i)
            # Z.remove(Z[y_i])
            V =V.T
            try:
                ri = np.argmax(np.abs(V[y_i]) >0)
            except:
                print("Error: Check: Matrix PSD/Sym")
                exit()
            V_r = V[:,ri]
            # nidx = list(range(ri)) + list(range(ri+1, len(V)))
            # V = V[nidx]

            if num_v>0:
                try:
                    V = la.orth(V- np.outer(V_r, (V[y_i,:]/V_r[y_i]) ))
                except:
                    print("Error in Orthogonalization: Check: Matrix PSD/Sym")
                    pdb.set_trace()

            V= V.T

            num_v-=1

        Y.sort()
        out = np.array(Y)

        return out
Example #37
0
def profile_boundary_conditions(Ya,Yb,s,p):
	n=s['n']
	speed=Ya[3]
	UL = [1.0/p['beta'], 0, 0]; UR = [0, 0, 0]
	
	AM = Flinear( np.array(UL) ,speed,p)
	AP = Flinear( np.array(UR) ,speed,p)
	
	P,Q = EvansBin.projection1(AM,-1,0.0) # Decay manifold at - infty
	LM = linalg.orth(P.T)
	P,Q = EvansBin.projection1(AP,0,1e-8) # Growth manifold at + infty
	LP = linalg.orth(P.T)
	
	BCa = list(Ya[0:n]-Ya[n:2*n]) #matching conditions = 4
	BCa.append( Ya[0] - 0.5/p['beta'])      # phase condition = 1 
	
	BCb = list(LM.T.dot(Yb[n:2*n-1] - UL)) # at -infty; = 2
	BCb.append( (LP.T.dot(Yb[0:n-1]-UR))[0]) # at +infty; = 1
	
	return np.real(np.array(BCa,dtype = complex)), np.real(np.array(BCb,dtype = complex))
Example #38
0
def test_verbosity():
    """Check that nonzero verbosity level code runs.
    """
    A, B = ElasticRod(100)
    n = A.shape[0]
    m = 20
    np.random.seed(0)
    V = rand(n, m)
    X = orth(V)
    _, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False,
                  verbosityLevel=9)
Example #39
0
    def fit(self, X, Y, n_features, n_passes):

        #Randomized range finder for A^T.B
        Qx = np.random.rand(X.shape[1],n_features)
        Qy = np.random.rand(Y.shape[1],n_features)
        
        for i in range(n_passes):
            Ya = self.RandomMatrix(X, Y, Qy)
            Yb = self.RandomMatrix(Y, X, Qx)
            
            Qx = sci_lia.orth(Ya)
            Qy = sci_lia.orth(Yb)      
        
        finalMatrix,Lx,Ly = self.FinalMatrixCalculation(Qx, Qy, X, Y)
        
        U,D,V = np_lia.svd(finalMatrix)
        Xx = ((X.shape[0])**(0.5)) * (np.dot(Qx, np.dot(np_lia.inv(Lx), U)))
        Xy = ((Y.shape[0])**(0.5)) * (np.dot(Qy, np.dot(np_lia.inv(Ly), V)))
    
        return Xx, Xy, D
Example #40
0
 def fit(self, X, y=None):
     n_size, n_features = X.shape
     S = np.cov(X.T)
     eigvals, eigvectors = np.linalg.eig(S)
     eigvectors = linalg.orth(eigvectors)
     indices = np.argsort(eigvals)[-self.M:][::-1]
     self.n_compents = eigvectors[indices]
     self.n_compents = self.n_compents.reshape((n_features, -1))
     self.eigvals = eigvals[indices]
     total = np.sum(eigvals)
     self.compent_ratos = self.eigvals / total
Example #41
0
 def __init__(self, sizes, len_training_data):
     self.lmax = 0
     self.num_layers = len(sizes)
     self.num_training_data = len_training_data
     self.sizes = sizes
     self.biase = np.random.uniform(-0.2, 0.2, (1, self.sizes[1]))
     self.biase = orth(self.biase)
     self.biases = np.zeros((len_training_data, self.sizes[1]))
     self.W_in = np.random.uniform(-1.0, 1.0, (self.sizes[1], self.sizes[0]))
     if self.sizes[1] > self.sizes[0]:
         self.W_in = orth(self.W_in)
         # print (self.W_in.shape)
     else:
         self.W_in = orth(self.W_in.T).T
         # print (self.W_in.shape)
     # self.W_in = orth(self.W_in)
     self.W_out = np.zeros((self.sizes[2], self.sizes[1]))
     self.H = np.zeros((self.num_training_data, self.sizes[1]))
     for i in range(len_training_data):
         self.biases[i] = self.biase
Example #42
0
def randomize_drone(dist_max,vel_max,ang_max,sim_agent):
    start_location = np.random.random(size=[3])*dist_max
    start_velocity = np.random.random(size=[3])*vel_max
    start_angular = np.random.random(size=[3])*ang_max
    #graham schmidt to get orientation, assume full rank... hopefully that works
    start_orientation = orth(np.random.random(size=[3,3]))

    sim_agent.setOrientation(start_orientation[0],start_orientation[1])
    sim_agent.setAngular(start_angular)
    sim_agent.setPosition(start_location)
    sim_agent.setVelocity(start_velocity)
Example #43
0
def run(n, d):
    y = np.ones((n, 1))
    y[n / 2:] = -1
    X = np.random.random((n, d))
    idx_row, idx_col = np.where(y == 1)
    X[idx_row, 0] = 0.1 + X[idx_row, 0]
    idx_row, idx_col = np.where(y == -1)
    X[idx_row, 0] = -0.1 - X[idx_row, 0]
    U = la.orth(np.random.random((d, d)))
    X = np.dot(X, U)
    return (X, y)
Example #44
0
    def fit(self,
            *,
            timeout: float = None,
            iterations: int = None) -> base.CallResult[None]:
        assert self._X is not None, "No training data provided."
        assert self._X.ndim == 2, "Data is not in the right shape."
        assert self._dim_subspaces <= self._X.shape[
            1], "Dim_subspaces should be less than ambient dimension."

        _X = self._X.T
        n_features, n_samples = _X.shape

        # randomly initialize subspaces
        U_init = np.zeros((self._k, n_features, self._dim_subspaces))
        for kk in range(self._k):
            U_init[kk] = orth(
                self._random_state.randn(n_features, self._dim_subspaces))

        # compute residuals
        full_residuals = np.zeros((n_samples, self._k))
        for kk in range(self._k):
            tmp1 = np.dot(U_init[kk].T, _X)
            tmp2 = np.dot(U_init[kk], tmp1)
            full_residuals[:, kk] = np.linalg.norm(_X - tmp2, ord=2, axis=0)

        # label by nearest subspace
        estimated_labels = np.argmin(full_residuals, axis=1)

        # alternate between subspace estimation and assignment
        prev_labels = -1 * np.ones(estimated_labels.shape)
        it = 0
        while np.sum(estimated_labels != prev_labels) and (iterations is None
                                                           or it < iterations):
            # first update residuals after labels obtained
            U = np.empty((self._k, n_features, self._dim_subspaces))
            for kk in range(self._k):
                Z = _X[:, estimated_labels == kk]
                D, V = np.linalg.eig(np.dot(Z, Z.T))
                D_idx = np.argsort(-D)  # descending order
                U[kk] = V.real[:, D_idx[list(range(self._dim_subspaces))]]
                tmp1 = np.dot(U[kk, :].T, _X)
                tmp2 = np.dot(U[kk, :], tmp1)
                full_residuals[:, kk] = np.linalg.norm(_X - tmp2,
                                                       ord=2,
                                                       axis=0)
            # update prev_labels
            prev_labels = estimated_labels
            # label by nearest subspace
            estimated_labels = np.argmin(full_residuals, axis=1)

            it = it + 1

        self._U = U
        return base.CallResult(None)
Example #45
0
 def time_mikota(self, n, solver):
     m = 10
     if solver == 'lobpcg':
         X = rand(n, m)
         X = orth(X)
         LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
         M = LinearOperator(self.shape,
                 matvec=partial(_precond, LorU, lower),
                 matmat=partial(_precond, LorU, lower))
         eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
     else:
         w = eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m-1))
Example #46
0
def svd3(data, k, meanSubtract=1):

    n = data.count()
    d = len(data.first())

    if meanSubtract == 1:
        data = data.map(lambda x: x - mean(x))

    def outerProd(x):
        return outer(x, x)

    def outerSum(iterator):
        yield sum(outer(x, x) for x in iterator)

    def outerSum2(iterator, other1, other2):
        yield sum(outer(x, dot(dot(x, other1), other2)) for x in iterator)

    C = random.rand(k, d)
    iterNum = 0
    iterMax = 10
    error = 100
    tol = 0.000001

    while (iterNum < iterMax) & (error > tol):
        Cold = C
        Cinv = dot(transpose(C), inv(dot(C, transpose(C))))
        preMult1 = data.context.broadcast(Cinv)
        # X = data.times(preMult1.value)
        # XX' = X.cov()
        XX = data.map(lambda x: outerProd(dot(x, preMult1.value))).reduce(lambda x, y: x + y)
        XXinv = inv(XX)
        preMult2 = data.context.broadcast(dot(Cinv, XXinv))
        # data1 = data.times(dot(Cinv, inv(XX'))
        # C = data.times(data1)
        C = data.map(lambda x: outer(x, dot(x, preMult2.value))).reduce(lambda x, y: x + y)
        C = transpose(C)

        error = sum(sum((C-Cold) ** 2))
        iterNum += 1

    C = transpose(orth(transpose(C)))
    # cov = data.times(transpose(C)).cov()
    cov = data.map(lambda x: dot(x, transpose(C))).mapPartitions(outerSum).reduce(
        lambda x, y: x + y) / n
    w, v = eig(cov)
    w = real(w)
    v = real(v)
    inds = argsort(w)[::-1]
    latent = w[inds[0:k]]
    comps = dot(transpose(v[:, inds[0:k]]), C)
    scores = data.map(lambda x: inner(x, comps))

    return comps, latent, scores
Example #47
0
 def __init__(self, basef, rotMat = None):
     """ by default the rotation matrix is random. """
     FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
     if rotMat == None:
         # make a random orthogonal rotation matrix
         self.M = orth(rand(basef.xdim, basef.xdim))
     else:
         self.M = rotMat
     if isinstance(basef, FunctionEnvironment):
         self.desiredValue = basef.desiredValue
     self.xopt = dot(inv(self.M), self.xopt)
     self.f = lambda x: basef.f(dot(x,self.M))
Example #48
0
def compare_solutions(A, B, m):
    """Check eig vs. lobpcg consistency.
    """
    n = A.shape[0]
    np.random.seed(0)
    V = rand(n, m)
    X = orth(V)
    eigvals, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False)
    eigvals.sort()
    w, _ = eig(A, b=B)
    w.sort()
    assert_almost_equal(w[:int(m / 2)], eigvals[:int(m / 2)], decimal=2)
Example #49
0
def ica(data, k, c, svdmethod="direct", maxiter=100, tol=0.000001, seed=0):
    """perform independent components analysis

    arguments:
    data - RDD of data points
    k - number of principal components to use
    c - number of independent components to find
    maxiter - maximum number of iterations (default = 100)
    tol - tolerance for change in estimate (default = 0.000001)

    returns:
    w - the mixing matrix
    sigs - the independent components
    """
    # get count
    n = data.count()

    # reduce dimensionality
    scores, latent, comps = svd(data, k, meansubtract=0, method=svdmethod)

    # whiten data
    whtmat = real(dot(inv(diag(latent/sqrt(n))), comps))
    unwhtmat = real(dot(transpose(comps), diag(latent/sqrt(n))))
    wht = data.map(lambda x: dot(whtmat, x))

    # do multiple independent component extraction
    if seed != 0:
        random.seed(seed)
    b = orth(random.randn(k, c))
    b_old = zeros((k, c))
    iter = 0
    minabscos = 0
    errvec = zeros(maxiter)

    while (iter < maxiter) & ((1 - minabscos) > tol):
        iter += 1
        # update rule for pow3 non-linearity (TODO: add others)
        b = wht.map(lambda x: outer(x, dot(x, b) ** 3)).sum() / n - 3 * b
        # make orthogonal
        b = dot(b, real(sqrtm(inv(dot(transpose(b), b)))))
        # evaluate error
        minabscos = min(abs(diag(dot(transpose(b), b_old))))
        # store results
        b_old = b
        errvec[iter-1] = (1 - minabscos)

    # get un-mixing matrix
    w = dot(transpose(b), whtmat)

    # get components
    sigs = data.map(lambda x: dot(w, x))

    return w, sigs
Example #50
0
def eigf_bcs(Ya,Yb,s,p):
	n=s['n'] # = 4 = Dimension of first order profile equation
	eig_n = 2*n # = 8 = Dimension of first order eigenfunction equation
	ph = s['ph']
	AM = A(s['L'], ((Ya[-2] + 1.0j*Ya[-1]) + (Yb[-2] + 1.0j*Yb[-1]))/2.0,s,p)
	AP = A(s['R'], ((Ya[-2] + 1.0j*Ya[-1]) + (Yb[-2] + 1.0j*Yb[-1]))/2.0,s,p)
	#
	P1,Q1 = projection2(AM,-1,1e-6); LM = linalg.orth(P1.T)
	P2,Q2 = projection2(AP,+1,1e-6); LP = linalg.orth(P2.T)
	#
	BCS = np.zeros(18)
	BCS[0:8] = (Ya[0:eig_n]-Ya[eig_n:2*eig_n]) # 8 matching conditions
	BCS[8] = Ya[ph[0,0]]- ph[0,1] 		# 2 phase conditions
	BCS[9] = Ya[ph[1,0]]- ph[1,1] 
	#
	# 8 projective conditions
	BCS[10:12] = np.real(LP.T.dot( Yb[0:n] + 1.0j*Yb[n:2*n] ))
	BCS[12:14] = np.imag(LP.T.dot( Yb[0:n] + 1.0j*Yb[n:2*n] ))
	BCS[14:16] = np.real(LM.T.dot( Yb[eig_n:eig_n+n] + 1.0j*Yb[eig_n+n:eig_n+2*n]	))
	BCS[16:18] = np.imag(LM.T.dot( Yb[eig_n:eig_n+n] + 1.0j*Yb[eig_n+n:eig_n+2*n]	))
	return BCS
Example #51
0
def test_verbosity():
    """Check that nonzero verbosity level code runs.
    """
    A, B = ElasticRod(100)

    n = A.shape[0]
    m = 20

    np.random.seed(0)
    V = rand(n,m)
    X = linalg.orth(V)

    eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False,
                       verbosityLevel=11)
Example #52
0
def random_walk(G,initial_prob,subspace_dim=3,walk_steps=3):
    """
    Start a random walk with probability distribution p_initial. 
    Transition matrix needs to be calculated according to adjacent matrix G.
    
    """
    assert type(initial_prob) == np.ndarray, "Initial probability distribution is \
                                             not a numpy array"
       
    # Transform the adjacent matrix to a laplacian matrix P
    P = adj_to_Laplacian(G)
    
    Prob_Matrix = np.zeros((G.shape[0], subspace_dim))
    Prob_Matrix[:,0] = initial_prob
    for i in range(1,subspace_dim):
        Prob_Matrix[:,i] = np.dot(Prob_Matrix[:,i-1], P)
     
    Orth_Prob_Matrix = splin.orth(Prob_Matrix)
    
    for i in range(walk_steps):
        temp = np.dot(Orth_Prob_Matrix.T, P)
        Orth_Prob_Matrix = splin.orth(temp.T)
    
    return Orth_Prob_Matrix
Example #53
0
def compare_solutions(A,B,m):
    n = A.shape[0]

    np.random.seed(0)

    V = rand(n,m)
    X = linalg.orth(V)

    eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30)
    eigs.sort()

    w,v = eig(A,b=B)
    w.sort()

    assert_almost_equal(w[:int(m/2)],eigs[:int(m/2)],decimal=2)
Example #54
0
 def __init__(self, basef, rotMat=None):
     """ by default the rotation matrix is random. """
     FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
     if rotMat == None:
         # make a random orthogonal rotation matrix
         self._M = orth(rand(basef.xdim, basef.xdim))
     else:
         self._M = rotMat
     self.desiredValue = basef.desiredValue            
     self.toBeMinimized = basef.toBeMinimized   
     self.xopt = dot(inv(self._M), self.xopt)
     def rf(x):
         if isinstance(x, ParameterContainer):
             x = x.params
         return basef.f(dot(x, self._M))    
     self.f = rf
Example #55
0
    def runtest(self):
        k = len(self.rdd.first()[1])
        c = 3
        n = 1000
        B = orth(random.randn(k, c))
        Bold = zeros((k, c))
        iterNum = 0
        errVec = zeros(20)
        while (iterNum < 5):
            iterNum += 1
            B = self.rdd.map(lambda (_, v): v).map(lambda x: outer(x, dot(x, B) ** 3)).reduce(lambda x, y: x + y) / n - 3 * B
            B = dot(B, real(sqrtm(inv(dot(transpose(B), B)))))
            minAbsCos = min(abs(diag(dot(transpose(B), Bold))))
            Bold = B
            errVec[iterNum-1] = (1 - minAbsCos)

        sigs = self.rdd.mapValues(lambda x: dot(B, x))
Example #56
0
def create_random_Sigmas(n, N, L, M, dist=gamma(2, scale=2)):
    """ Creates N random nxn covariance matrices s.t. the Lipschitz
        constants are uniformly bounded by Lbnd. Here dist is a 'frozen'
        scipy.stats probability distribution supported on R+"""
    # compute lower bound for eigenvalues
    lambdamin = ((2*np.pi)**n*np.e*L**2/M**2)**(-1.0/(n+1))
    Sigmas = []
    for i in range(N):
        # create random orthonormal matrix
        V = orth(np.random.uniform(size=(n,n)))
        # create n random eigenvalues from the distribution and shift them by lambdamin
        lambdas = lambdamin + dist.rvs(n)
        Sigma = np.zeros((n,n))
        for lbda, v in zip(lambdas, V):
            Sigma = Sigma + lbda*np.outer(v,v)
        Sigmas.append(Sigma)
    return np.array(Sigmas)
Example #57
0
def solve_lrr(X, A, lamb, reg=0, alm_type=0, display=False):
    Q = orth(A.T)
    B = A.dot(Q)

    if reg == 0:
        if alm_type == 0:
            Z, E = exact_alm_lrr_l21v2(X, B, lamb, display=display)
        else:
            Z, E = inexact_alm_lrr_l21(X, B, lamb, display=display)
    else:
        if alm_type == 0:
            Z, E = exact_alm_lrr_l1v2(X, B, lamb, display=display)
        else:
            Z, E = inexact_alm_lrr_l1(X, B, lamb, display)

    Z = Q.dot(Z)
    return (Z, E)
Example #58
0
def create_random_Q(domain, mu, L, M, pd=True, H=0, dist=uniform()):
    """ Creates random symmetric nxn matrix s.t. the Lipschitz constant of the resulting
        quadratic function is bounded by L. Here M is the uniform bound on the maximal loss.
        If pd is True, then the matrix is pos. definite and H is a lower bound on the
        eigenvalues of Q.  Finally, dist is a 'frozen' scipy.stats probability
        distribution supported on [0,1].  """
    n = domain.n
    # compute upper bound for eigenvalues
    Dmu = domain.compute_Dmu(mu)
    lambdamax = np.min((L/Dmu, 2*M/Dmu**2))
    # create random orthonormal matrix
    V = orth(np.random.uniform(size=(n,n)))
    # create n random eigenvalues from the distribution dist, scale them by lambdamax
    if pd:
        lambdas = H + (lambdamax-H)*dist.rvs(n)
    else:
        # randomly assign pos. and negative values to the evals
        lambdas = np.random.choice((-1,1), size=n)*lambdamax*dist.rvs(n)
    return np.dot(V, np.dot(np.diag(lambdas), V.T)), lambdamax
Example #59
0
def haar_matrix(N):
    if(not np.log2(N).is_integer()):
        print("Invalid Haar Matrix Size")
        raise Exception
    else:
        H = np.ones((1,N))
        for k in range(1,N):
            p = max(0, np.floor(np.log2(k)))
            q = k - (2.0**p) + 1
            H_pos_k_upper =  np.array((q - 1.0)/(2.0**p) <= np.linspace(0,1,N,False))
            H_pos_k_lower =  np.array( np.linspace(0,1,N,endpoint=False) < (q - 0.5)/(2.0**p))
            H_pos_k =  np.logical_and(H_pos_k_upper, H_pos_k_lower)
            H_neg_k_upper =  np.array((q - 0.5)/(2.0**p) <= np.linspace(0,1,N,endpoint=False))
            H_neg_k_lower =  np.array(np.linspace(0,1,N,endpoint=False) < (q)/(2.0**p))
            H_neg_k = np.logical_and(H_neg_k_upper, H_neg_k_lower)
            H_k = np.array(H_pos_k, dtype=np.float) - np.array(H_neg_k, dtype=np.float)
            H_k = H_k
            H = np.vstack((H,H_k))

    return linalg.orth(H)