Exemple #1
0
    def __init__(self, A, space, integrator, measure):
        self.A = A


        self.DL = tril(A).tocsr()
        self.U = triu(A, k=1).tocsr()

        self.DU = triu(A).tocsr()
        self.L =  tril(A, k=-1).tocsr()

        linspace = LagrangeFiniteElementSpace(space.mesh, 1)

        # construct amg solver for linear 
        A1 = stiff_matrix(linspace, integrator, measure)
        isBdDof = linspace.boundary_dof()
        bdIdx = np.zeros((A1.shape[0], ), np.int)
        bdIdx[isBdDof] = 1
        Tbd = spdiags(bdIdx, 0, A1.shape[0], A1.shape[0])
        T = spdiags(1-bdIdx, 0, A1.shape[0], A1.shape[0])
        A1 = T@A1@T + Tbd
        self.ml = pyamg.ruge_stuben_solver(A1)  

        # Get interpolation matrix 
        NC = space.mesh.number_of_cells()
        bc = space.dof.multiIndex/space.p
        val = np.tile(bc, (NC, 1))
        c2d0 = space.cell_to_dof()
        c2d1 = linspace.cell_to_dof()

        I = np.einsum('ij, k->ijk', c2d0, np.ones(3))
        J = np.einsum('ik, j->ijk', c2d1, np.ones(len(bc)))
        gdof = space.number_of_global_dofs()
        lgdof = linspace.number_of_global_dofs()
        self.PI = csr_matrix((val.flat, (I.flat, J.flat)), shape=(gdof, lgdof))
Exemple #2
0
def split_matrix_by_relation(mat):
    '''
    split the sparse adjacency matrix by different relations
    '''
    length = mat.shape[0]
    a, b, c, d = sp.triu(mat, 1), sp.triu(mat, 2), sp.tril(mat, -1), sp.tril(mat, -2)
    rel_mats = [(a - b).tocoo(), (c - d).tocoo(), b, d]
    return [rm.data for rm in rel_mats], \
           [np.stack([rm.row, rm.col], axis=1) for rm in rel_mats], length
Exemple #3
0
def main():
    # create the matrix and the right hand sides
    N = 1000
    A = sp.coo_matrix(
        hilbert(N) + np.identity(N)
    )  # a well-condition, symmetric, positive-definite matrix with off-diagonal entries
    true_x1 = np.arange(N)
    true_x2 = np.array(list(reversed(np.arange(N))))
    b1 = A * true_x1
    b2 = A * true_x2

    # solve
    solver = MumpsCentralizedAssembledLinearSolver()
    x1, res = solver.solve(A, b1)
    x2, res = solver.solve(A, b2)
    assert np.allclose(x1, true_x1)
    assert np.allclose(x2, true_x2)

    # only perform factorization once
    solver = MumpsCentralizedAssembledLinearSolver()
    solver.do_symbolic_factorization(A)
    solver.do_numeric_factorization(A)
    x1, res = solver.do_back_solve(b1)
    x2, res = solver.do_back_solve(b2)
    assert np.allclose(x1, true_x1)
    assert np.allclose(x2, true_x2)

    # Tell Mumps the matrix is symmetric
    # Note that the answer will be incorrect if both the lower
    # and upper portions of the matrix are given.
    solver = MumpsCentralizedAssembledLinearSolver(sym=2)
    A_lower_triangular = sp.tril(A)
    x1, res = solver.solve(A_lower_triangular, b1)
    assert np.allclose(x1, true_x1)

    # Tell Mumps the matrix is symmetric and positive-definite
    solver = MumpsCentralizedAssembledLinearSolver(sym=1)
    A_lower_triangular = sp.tril(A)
    x1, res = solver.solve(A_lower_triangular, b1)
    assert np.allclose(x1, true_x1)

    # Set options
    solver = MumpsCentralizedAssembledLinearSolver(
        icntl_options={11: 2})  # compute error stats
    solver.set_cntl(2,
                    1e-4)  # set the stopping criteria for iterative refinement
    solver.set_icntl(
        10, 5
    )  # set the maximum number of iterations for iterative refinement to 5
    x1, res = solver.solve(A, b1)
    assert np.allclose(x1, true_x1)

    # Get information after the solve
    print('Number of iterations of iterative refinement performed: ',
          solver.get_infog(15))
    print('scaled residual: ', solver.get_rinfog(6))
def plotDispatchCurve(outData, eta, t, isoName):
    chargeData = outData[0:t, ]
    dischargeData = outData[t:2 * t, ]
    A1 = hstack([eta * tril(np.ones([t, t])), -1 * tril(np.ones([t, t]))])
    soc = A1 * outData
    plt.plot(np.arange(0, t, 1), chargeData, 'r-', np.arange(0, t, 1),
             dischargeData, 'b-', np.arange(0, t, 1), soc, 'g-')
    plt.xlabel('Hour')
    plt.legend(('Charging', 'Discharging', 'Stored Energy'))
    plt.savefig(isoName + 'dispatchCurve.png')
    plt.close()
def two_grid(A_, f_, al, be, N, max_iter, tol):
    n = int((N - 1) / 2)

    xc = np.linspace(0, 1, n + 2)[1:-1]
    xf = np.linspace(0, 1, 2 * n + 1 + 2)[1:-1]

    Ac = A_(n)
    Af = A_(2 * n + 1)

    def b_(f, n, al, be):
        b = f
        b[0] -= al * (n + 1)**2
        b[-1] -= be * (n + 1)**2

        return b

    bf = b_(f_(xf, 0.5), n, 0, 0)

    Icf = Icf_(n)
    Ifc = Ifc_(n)

    # Gauss-Seidel
    Mc = sparse.tril(Ac, 0, format='csc')
    Mf = sparse.tril(Af, 0, format='csc')

    # jacobi
    w = 3 / 2
    Mc = w * sparse.diags(Ac.diagonal(), format='csc')
    Mf = w * sparse.diags(Af.diagonal(), format='csc')

    res_norm = []
    uf = 0 * xf
    rf = bf - Af.dot(uf)
    for k in range(max_iter):

        res_norm.append(np.linalg.norm(rf) / np.linalg.norm(bf))
        if res_norm[-1] < tol:
            return uf, k

        # project to coarse grid
        rc = Ifc.dot(rf)

        # solve on coarse grid
        zc = sparse.linalg.spsolve(Ac, rc)

        # interpolate to fine grid and update fine solution
        uf += Icf.dot(zc)

        # smooth on fine grid
        rf = bf - Af.dot(uf)
        uf += sparse.linalg.spsolve_triangular(Mf, rf)

    return uf, -1
Exemple #6
0
 def testProblemStiffness2(self):
     width = 4
     height = 4
     [nodes, boundary_nodes, tris] = generateRectangularMesh((width, height), (0,0), (1,1))
     p = Problem(nodes, boundary_nodes, tris)
     K = p.getStiffnessMatrix(lambda x,y: x*y)
     self.assertTrue((sparse.triu(K, 1).T.toarray() == sparse.tril(K,-1).toarray()).all())
     width = 5
     height = 5
     [nodes, boundary_nodes, tris] = generateRectangularMesh((width, height), (0,0), (1,1))
     p = Problem(nodes, boundary_nodes, tris)
     K = p.getStiffnessMatrix(lambda x,y: x*y)
     self.assertTrue((sparse.triu(K, 1).T.toarray() == sparse.tril(K,-1).toarray()).all())
Exemple #7
0
    def test_tril_behavior(self):
        mat = get_base_matrix(use_tril=True)
        mat2 = tril(mat)
        self.assertTrue(np.all(mat.row == mat2.row))
        self.assertTrue(np.all(mat.col == mat2.col))
        self.assertTrue(np.allclose(mat.data, mat2.data))

        mat = get_base_matrix_wrong_order(use_tril=True)
        self.assertFalse(np.all(mat.row == mat2.row))
        self.assertFalse(np.allclose(mat.data, mat2.data))
        mat2 = tril(mat)
        self.assertTrue(np.all(mat.row == mat2.row))
        self.assertTrue(np.all(mat.col == mat2.col))
        self.assertTrue(np.allclose(mat.data, mat2.data))
Exemple #8
0
    def __init__(self,H,g,A,b,l,u,x=None,lam=None,mu=None,pi=None):
        """
        Quadratic program class.
        
        Parameters
        ----------
        H : symmetric matrix
        g : vector
        A : matrix
        l : vector
        u : vector
        x : vector
        """

        OptProblem.__init__(self)

        self.H = coo_matrix(H)
        self.Hphi = tril(self.H) # lower triangular
        self.g = g
        self.A = coo_matrix(A)
        self.b = b
        self.u = u
        self.l = l

        self.x = x
        
        self.lam = lam
        self.mu = mu
        self.pi = pi
Exemple #9
0
def texture_boundary(mesh, atex, val):
    """
    compute indexes that are the boundary of a region defined by value
    in a texture
    :param mesh:
    :param atex:
    :param val:
    :return:
    """
    # see mesh.facets_boundary()
    tex_val_indices = np.where(atex == val)[0]
    if not tex_val_indices.size:
        print('no value ' + str(val) + ' in the input texture!!')
        return list()
    else:

        bound_verts = texture_boundary_vertices(atex, val,
                                                mesh.vertex_neighbors)
        # select the edges that are on the boundary in the polygons
        adja = edges_to_adjacency_matrix(mesh)
        adja_tri = sparse.triu(adja) + sparse.tril(adja).transpose()
        r = sparse.extract.find(adja_tri)
        inr0 = []
        inr1 = []
        for v in bound_verts:
            inr0.extend(np.where(r[0] == v)[0])
            inr1.extend(np.where(r[1] == v)[0])
        r[2][inr0] = r[2][inr0] + 1
        r[2][inr1] = r[2][inr1] + 1
        li = r[0][np.where(r[2] == 4)]
        lj = r[1][np.where(r[2] == 4)]

        return edges_to_boundary(li, lj)
Exemple #10
0
def ssor(A, b, x0=None, w=1., maxiter=200, tol=1E-6):
    '''For symmetric matrices combine forward and backward SOR.'''
    assert is_symmetric(A, tol=1E-6)

    L, D, U = tril(A, k=-1), diags(A.diagonal(), 0), triu(A, k=1)
    # Forward
    MF = L + D/w
    NF = (1/w - 1)*D - U
    # Backward
    MB = U + D/w
    NB = (1/w - 1)*D - L

    # Start from 0 initial guess
    if x0 is None: x0 = np.zeros(A.shape[1])

    r = b - A.dot(x0)
    residuals = [np.linalg.norm(r)]

    count = 0
    while residuals[-1] > tol and count < maxiter:
        # Update
        x0 = spsolve(MF, NF.dot(x0) + b)
        x0 = spsolve(MB, NB.dot(x0) + b)
        # Error 
        r = b - A.dot(x0)
        residuals.append(np.linalg.norm(r))
        # Count
        count += 1
    
    converged = residuals[-1] < tol
    n_iters = len(residuals) - 1
    data = {'status': converged, 'iter count': n_iters, 'residuals': residuals}

    return x0, data
Exemple #11
0
def sparse_power_iteration(P, x, tol=10e-16, maxiter=200):
    """Preconditioned power iteration for a sparse stochastic matrix

    Parameters
    ---------------
    P : array, shape (n, n), sparse
        transition matrix of a Markov Chain
    x : array, shape (n, )
        On entry, the initial guess. On exit, the final solution.

    """
    t = 0
    eps = tol + 1
    n = P.shape[0]
    # ILU factorization 
    LU = ilu0_factor(P)
    L = sparse.tril(LU)
    U = sparse.triu(LU)
    # New matrix Q
    Q = P.copy()
    Q.setdiag(1 - Q.diagonal())
    Q *= -1
    Q = Q.T
    info = -1
    t = -1
    for t in range(maxiter):
        ## dot() is matrix multiplication
        dx = spla.spsolve(U, spla.spsolve(L, Q.matvec(x)))
        x -= dx
        relres = tvnorm(dx)
        if relres < tol:
            info = 0
            break
    t += 1
    return (info, t, relres)
Exemple #12
0
def make_graph(scores, threshold=True, k_closest=False, k=3):
    print('==> Getting edges')
    no_pts = len(scores)
    if k_closest:
        k_ = k
    else:
        k_ = 1 # ensure there is at least one edge per node in the threshold graph
    edges = sparse.lil_matrix((no_pts, no_pts), dtype=np.uint8)
    scores.fill_diagonal_(0)  # get rid of self connection scores
    for patient in range(no_pts):
        k_highest = torch.sort(scores[patient].flatten()).indices[-k_:]
        for i in k_highest:
            edges[patient, i] = 1
    del scores
    edges = edges + edges.transpose()  # make it symmetric again
    # do upper triangle again and then save
    edges = sparse.tril(edges, k=-1)
    if threshold:
        scores_lower = torch.tril(scores, diagonal=-1)
        del scores
        desired_no_edges = k * no_pts
        threshold_value = torch.sort(scores_lower.flatten()).values[-desired_no_edges]
        #for batch in batch(no_pts, n=10):
        for batch in torch.split(scores_lower, 100, dim=0):
            batch[batch < threshold_value] = 0
        edges = edges + sparse.lil_matrix(scores_lower)
        del scores_lower
    v, u, _ = sparse.find(edges)
    return u, v, k
Exemple #13
0
    def get_edge_list(self):
        r"""Return an edge list, an alternative representation of the graph.

        The weighted adjacency matrix is the canonical form used in this
        package to represent a graph as it is the easiest to work with when
        considering spectral methods.

        Returns
        -------
        v_in : vector of int
        v_out : vector of int
        weights : vector of float

        Examples
        --------
        >>> G = graphs.Logo()
        >>> v_in, v_out, weights = G.get_edge_list()
        >>> v_in.shape, v_out.shape, weights.shape
        ((3131,), (3131,), (3131,))

        """

        if self.is_directed():
            raise NotImplementedError('Directed graphs not supported yet.')

        else:
            v_in, v_out = sparse.tril(self.W).nonzero()
            weights = self.W[v_in, v_out]
            weights = np.asarray(weights).squeeze()

            # TODO G.ind_edges = sub2ind(size(G.W), G.v_in, G.v_out)

            assert self.Ne == v_in.size == v_out.size == weights.size
            return v_in, v_out, weights
Exemple #14
0
def eval_h(x, lagrange, obj_factor, flag, user_data=None):
    """Calculates the Hessian matrix (optional).

    If omitted, set nnzh to 0 and Ipopt will use approximated Hessian
    which will make the convergence slower.
    """
    Hs = user_data['Hs']
    if flag:
        return (Hs.row, Hs.col)
    else:
        neqnln = user_data['neqnln']
        niqnln = user_data['niqnln']
        om = user_data['om']
        Ybus = user_data['Ybus']
        Yf = user_data['Yf']
        Yt = user_data['Yt']
        ppopt = user_data['ppopt']
        il = user_data['il']

        lam = {}
        lam['eqnonlin'] = lagrange[:neqnln]
        lam['ineqnonlin'] = lagrange[arange(niqnln) + neqnln]

        H = opf_hessfcn(x, lam, om, Ybus, Yf, Yt, ppopt, il, obj_factor)

        Hl = tril(H, format='csc')

        ## FIXME: Extend PyIPOPT to handle changes in sparsity structure
        nnzh = Hs.nnz
        Hd = zeros(nnzh)
        for i in range(nnzh):
            Hd[i] = Hl[Hs.row[i], Hs.col[i]]

        return Hd
Exemple #15
0
def _neighbour_list(graph, separator, secondary, attributes, **kwargs):
    '''
    Generate a string containing the neighbour list of the graph as well as a
    dict containing the notifiers as key and the associated values.
    @todo: speed this up!
    '''
    lst_neighbours = None

    if graph.is_directed():
        lst_neighbours = list(graph.adjacency_matrix(mformat="lil").rows)
    else:
        import scipy.sparse as ssp
        lst_neighbours = list(
            ssp.tril(graph.adjacency_matrix(), format="lil").rows)

    for v1 in range(graph.node_nb()):
        for i, v2 in enumerate(lst_neighbours[v1]):
            str_edge = str(v2)

            eattr = graph.get_edge_attributes((v1, v2))

            for attr in attributes:
                str_edge += "{}{}".format(secondary, eattr[attr])

            lst_neighbours[v1][i] = str_edge

        lst_neighbours[v1] = "{}{}{}".format(
            v1, separator, separator.join(lst_neighbours[v1]))

    str_neighbours = "\n".join(lst_neighbours)

    return str_neighbours
Exemple #16
0
def Jacobi(A, B, x, e):
    '''Jacobi 迭代方法'''
    T0 = time.time()
    D = sparse.diags(A.diagonal())
    L = -sparse.tril(A, -1)
    U = -sparse.triu(A, 1)
    print('Solving B_J')
    B_J = sparse.linalg.inv(D) * (L + U)
    print('Solved B_J')
    x_next = x.copy()
    times = 0
    err_record = np.array([])
    while True:
        x_next = B_J * x + sparse.linalg.inv(D) * B
        err = max(abs(x_next - x))
        err_record = np.append(err_record, err)

        times += 1
        print('err = ', err)
        x = x_next.copy()
        if err < e:
            break
        elif times > 1000:
            print('Times out of range')
            break
    T1 = time.time()
    print('Iteration Times = ', times)
    print('err = ', err)
    print('CPU time = ', T1 - T0)
    #print(A * x)
    #print('X = ', x)
    return x, err_record
def SOR(A, b, w, tol, max_iter=20, opt=False):
    n = np.shape(A)[1]
    x = sparse.csc_matrix((n, 1))

    D = sparse.diags(A.diagonal(), format='csc')
    L = sparse.tril(A, -1, format='csc')
    M = D / w + L

    if opt:
        G = sparse.eye(n) - sparse.diags(1 / A.diagonal(), format='csc').dot(A)
        p = np.linalg.norm(sparse.linalg.eigs(G, 1, which='LM')[0])
        w = 2 / (1 + np.sqrt(1 - p**2))
        print(w)
    residual_norm = np.zeros(max_iter)

    iter_tol = -1
    for iter in range(max_iter):
        residual = b - A.dot(x)
        x += sparse.linalg.spsolve_triangular(M, residual)
        residual_norm[iter] = np.linalg.norm(residual) / np.linalg.norm(b)
        if residual_norm[iter] < tol:
            iter_tol = iter
            break

    return (x, residual_norm, iter_tol)
Exemple #18
0
def sor(A, b, x0=None, w=1., maxiter=200, tol=1E-6, direction='forward'):
    '''
    SOR iteration has M = L + D/w, N = (1/w-1)*D - U for forward
    and M = U + D/w, N = (1/w-1)*D - L for bacward.
    '''
    L, D, U = tril(A, k=-1), diags(A.diagonal(), 0), triu(A, k=1)
    if direction == 'forward':
        M = L + D/w
        N = (1/w - 1)*D - U
    else:
        M = U + D/w
        N = (1/w - 1)*D - L

    # Start from 0 initial guess
    if x0 is None: x0 = np.zeros(A.shape[1])

    r = b - A.dot(x0)
    residuals = [np.linalg.norm(r)]

    count = 0
    while residuals[-1] > tol and count < maxiter:
        # Update
        x0 = spsolve(M, N.dot(x0) + b)
        # Error 
        r = b - A.dot(x0)
        residuals.append(np.linalg.norm(r))
        # Count
        count += 1
    
    converged = residuals[-1] < tol
    n_iters = len(residuals) - 1
    data = {'status': converged, 'iter count': n_iters, 'residuals': residuals}

    return x0, data
Exemple #19
0
def deg_vec(W, deg_type='out', sparse_flag=True):
    r"""
    Create the degree vector

    Parameters
    ----------
    W : array
        Adjacency matrix
    deg_type : string
        Degree type to use in case the graph is directed.
    sparse_flag : bool
        Use sparse matrices (True) or not (False).

    """
    if is_directed(W):
        if deg_type == 'in':
            d = sparse.tril(W, k=0, format='csr').sum(
                1) if sparse_flag else np.sum(np.tril(W), axis=1)
        elif deg_type == 'out':
            d = sparse.triu(W, k=0, format='csr').sum(
                1) if sparse_flag else np.sum(np.triu(W), axis=1)
        elif deg_type == 'average':
            d = 0.5 * (np.sum(W, axis=0) + np.sum(W, axis=1))
    else:
        d = np.sum(W, axis=1)

    return np.ravel(d)
Exemple #20
0
    def do_symbolic_factorization(self, matrix, raise_on_error=True):
        if not isspmatrix_coo(matrix):
            matrix = matrix.tocoo()
        matrix = tril(matrix)
        nrows, ncols = matrix.shape
        self._dim = nrows

        try:
            self._mumps.do_symbolic_factorization(matrix)
            self._prev_allocation = max(self.get_infog(16), self.get_icntl(23))
            # INFOG(16) is the Mumps estimate for memory usage; ICNTL(23)
            # is the override used in increase_memory_allocation. Both are
            # already rounded to MB, so neither should every be negative.
        except RuntimeError as err:
            if raise_on_error:
                raise err

        stat = self.get_infog(1)
        res = LinearSolverResults()
        if stat == 0:
            res.status = LinearSolverStatus.successful
        elif stat in {-6, -10}:
            res.status = LinearSolverStatus.singular
        elif stat < 0:
            res.status = LinearSolverStatus.error
        else:
            res.status = LinearSolverStatus.warning
        return res
    def do_numeric_factorization(self,
                                 matrix,
                                 raise_on_error=True,
                                 timer=None):
        if not isspmatrix_coo(matrix):
            matrix = matrix.tocoo()
        matrix = tril(matrix)

        if (not np.array_equal(matrix.row, self._row)) or (not np.array_equal(
                matrix.col, self._col)):
            self.do_symbolic_factorization(matrix=matrix,
                                           raise_on_error=raise_on_error,
                                           timer=timer)

        try:
            self._mumps.do_numeric_factorization(matrix)
        except RuntimeError as err:
            if raise_on_error:
                raise err

        stat = self.get_infog(1)
        res = LinearSolverResults()
        if stat == 0:
            res.status = LinearSolverStatus.successful
        elif stat in {-6, -10}:
            res.status = LinearSolverStatus.singular
        elif stat in {-8, -9}:
            res.status = LinearSolverStatus.not_enough_memory
        elif stat < 0:
            res.status = LinearSolverStatus.error
        else:
            res.status = LinearSolverStatus.warning
        return res
Exemple #22
0
    def do_symbolic_factorization(self, matrix, raise_on_error=True):
        if not isspmatrix_coo(matrix):
            matrix = matrix.tocoo()
        matrix = tril(matrix)
        nrows, ncols = matrix.shape
        self._dim = nrows

        try:
            self._mumps.do_symbolic_factorization(matrix)
            self._prev_allocation = self.get_infog(16)
        except RuntimeError as err:
            if raise_on_error:
                raise err

        stat = self.get_infog(1)
        res = LinearSolverResults()
        if stat == 0:
            res.status = LinearSolverStatus.successful
        elif stat in {-6, -10}:
            res.status = LinearSolverStatus.singular
        elif stat < 0:
            res.status = LinearSolverStatus.error
        else:
            res.status = LinearSolverStatus.warning
        return res
def eval_h(x, lagrange, obj_factor, flag, user_data=None):
    """Calculates the Hessian matrix (optional).

    If omitted, set nnzh to 0 and Ipopt will use approximated Hessian
    which will make the convergence slower.
    """
    Hs = user_data['Hs']
    if flag:
        return (Hs.row, Hs.col)
    else:
        neqnln = user_data['neqnln']
        niqnln = user_data['niqnln']
        om     = user_data['om']
        Ybus   = user_data['Ybus']
        Yf     = user_data['Yf']
        Yt     = user_data['Yt']
        ppopt  = user_data['ppopt']
        il     = user_data['il']

        lam = {}
        lam['eqnonlin']   = lagrange[:neqnln]
        lam['ineqnonlin'] = lagrange[arange(niqnln) + neqnln]

        H = opf_hessfcn(x, lam, om, Ybus, Yf, Yt, ppopt, il, obj_factor)

        Hl = tril(H, format='csc')

        ## FIXME: Extend PyIPOPT to handle changes in sparsity structure
        nnzh = Hs.nnz
        Hd = zeros(nnzh)
        for i in range(nnzh):
            Hd[i] = Hl[Hs.row[i], Hs.col[i]]

        return Hd
Exemple #24
0
    def do_numeric_factorization(self, matrix, raise_on_error=True):
        if not isspmatrix_coo(matrix):
            matrix = matrix.tocoo()
        matrix = tril(matrix)
        nrows, ncols = matrix.shape
        if nrows != ncols:
            raise ValueError('Matrix must be square')
        if nrows != self._dim:
            raise ValueError(
                'Matrix dimensions do not match the dimensions of '
                'the matrix used for symbolic factorization')

        stat = self._ma27.do_numeric_factorization(irn=matrix.row,
                                                   icn=matrix.col,
                                                   dim=self._dim,
                                                   entries=matrix.data)
        res = LinearSolverResults()
        if stat == 0:
            res.status = LinearSolverStatus.successful
        else:
            if raise_on_error:
                raise RuntimeError(
                    'Numeric factorization was not successful; return code: ' +
                    str(stat))
            if stat in {-3, -4}:
                res.status = LinearSolverStatus.not_enough_memory
            elif stat in {-5, 3}:
                res.status = LinearSolverStatus.singular
            else:
                res.status = LinearSolverStatus.error

        self._num_status = res.status

        return res
Exemple #25
0
def diag_trim(mat, n):
    """
    Trim an upper triangle sparse matrix so that only the first n diagonals
    are kept.

    Parameters
    ----------

    mat : scipy.sparse.csr_matrix or numpy.array
        The sparse matrix to be trimmed
    n : int
        The number of diagonals from the center to keep (0-based).

    Returns
    -------
    scipy.sparse.dia_matrix or numpy.array:
        The diagonally trimmed upper triangle matrix with only the first
        n diagonal.
    """
    if not sp.issparse(mat):
        trimmed = mat.copy()
        n_diags = trimmed.shape[0]
        for diag in range(n, n_diags):
            set_mat_diag(trimmed, diag, 0)
        return trimmed

    if mat.format != "csr":
        raise ValueError("input type must be scipy.sparse.csr_matrix")
    # Trim diagonals by removing all elements further than n in the
    # upper triangle
    trimmed = sp.tril(mat, n, format="csr")
    trimmed = sp.triu(trimmed, format="csr")

    return trimmed
Exemple #26
0
def prune_adj(oriadj, non_zero_idx: int, percent: int):
    original_prune_num = int(
        ((non_zero_idx - oriadj.size()[0]) / 2) * (percent / 100))
    adj = SparseTensor.from_torch_sparse_coo_tensor(oriadj).to_scipy()

    # find the lower half of the matrix
    low_adj = tril(adj, -1)
    non_zero_low_adj = low_adj.data[low_adj.data != 0]

    low_pcen = np.percentile(abs(non_zero_low_adj), percent)
    under_threshold = abs(low_adj.data) < low_pcen
    before = len(non_zero_low_adj)
    low_adj.data[under_threshold] = 0
    non_zero_low_adj = low_adj.data[low_adj.data != 0]
    after = len(non_zero_low_adj)

    rest_pruned = original_prune_num - (before - after)
    if rest_pruned > 0:
        mask_low_adj = (low_adj.data != 0)
        low_adj.data[low_adj.data == 0] = 2000000
        flat_indices = np.argpartition(low_adj.data,
                                       rest_pruned - 1)[:rest_pruned]
        low_adj.data = np.multiply(low_adj.data, mask_low_adj)
        low_adj.data[flat_indices] = 0
    low_adj.eliminate_zeros()
    new_adj = low_adj + low_adj.transpose()
    new_adj = new_adj + sparse.eye(new_adj.shape[0])
    return SparseTensor.from_scipy(new_adj).to_torch_sparse_coo_tensor().to(
        device)
Exemple #27
0
    def do_symbolic_factorization(self, matrix, raise_on_error=True):
        self._num_status = None
        if not isspmatrix_coo(matrix):
            matrix = matrix.tocoo()
        matrix = tril(matrix)
        nrows, ncols = matrix.shape
        if nrows != ncols:
            raise ValueError('Matrix must be square')
        self._dim = nrows

        stat = self._ma27.do_symbolic_factorization(dim=self._dim,
                                                    irn=matrix.row,
                                                    icn=matrix.col)
        res = LinearSolverResults()
        if stat == 0:
            res.status = LinearSolverStatus.successful
        else:
            if raise_on_error:
                raise RuntimeError(
                    'Symbolic factorization was not successful; return code: '
                    + str(stat))
            if stat in {-3, -4}:
                res.status = LinearSolverStatus.not_enough_memory
            elif stat in {-5, 3}:
                res.status = LinearSolverStatus.singular
            else:
                res.status = LinearSolverStatus.error
        return res
 def setUp(self):
     n = 50
     nrhs = 20
     self.A = sp.rand(n, n, 0.4) + sp.identity(n)
     self.sol = np.ones((n, nrhs))
     self.rhsU = sp.triu(self.A) * self.sol
     self.rhsL = sp.tril(self.A) * self.sol
 def test_directLower_python(self):
     from pymatsolver import _ForwardSolver
     ALinv = _ForwardSolver(sp.tril(self.A))
     X = ALinv * self.rhsL
     x = ALinv * self.rhsL[:,0]
     self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
     self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def cooler2csr(cooleruri):
    '''
    loads a cooler into a csr matrix
    taken from HiCMatrix cool.py see also
    https://github.com/deeptools/HiCMatrix/blob/master/hicmatrix/lib/cool.py

    :param cooleruri:   uri to a given cooler

    :return:            data in cooler as scipy.sparse.csr_matrix
    '''
    cooler_file = cooler.Cooler(cooleruri)
    matrixDataFrame = cooler_file.matrix(balance=False,
                                         sparse=True,
                                         as_pixels=True)
    used_dtype = np.int32
    if np.iinfo(np.int32).max < cooler_file.info['nbins']:
        used_dtype = np.int64
    count_dtype = matrixDataFrame[0]['count'].dtype
    data = np.empty(cooler_file.info['nnz'], dtype=count_dtype)
    instances = np.empty(cooler_file.info['nnz'], dtype=used_dtype)
    features = np.empty(cooler_file.info['nnz'], dtype=used_dtype)
    i = 0
    size = cooler_file.info['nbins'] // 32
    if size == 0:
        size = 1
    start_pos = 0
    while i < cooler_file.info['nbins']:
        matrixDataFrameChunk = matrixDataFrame[i:i + size]
        _data = matrixDataFrameChunk['count'].values.astype(count_dtype)
        _instances = matrixDataFrameChunk['bin1_id'].values.astype(used_dtype)
        _features = matrixDataFrameChunk['bin2_id'].values.astype(used_dtype)

        data[start_pos:start_pos + len(_data)] = _data
        instances[start_pos:start_pos + len(_instances)] = _instances
        features[start_pos:start_pos + len(_features)] = _features
        start_pos += len(_features)
        i += size
        del _data
        del _instances
        del _features

    matrix = csr_matrix((data, (instances, features)),
                        shape=(np.int(cooler_file.info['nbins']),
                               np.int(cooler_file.info['nbins'])),
                        dtype=count_dtype)

    del data
    del instances
    del features
    gc.collect()

    # filling lower triangle in case only upper triangle was saved
    if tril(matrix, k=-1).sum() == 0:
        # this case means that the lower triangle of the
        # symmetric matrix (below the main diagonal)
        # is zero. In this case, replace the lower
        # triangle using the upper triangle
        matrix = matrix + triu(matrix, 1).T

    return matrix
Exemple #31
0
def adjacency_split_naive(A, p_val, neg_mul=1, max_num_val=None):
    edges = np.column_stack(sp.tril(A).nonzero())
    num_edges = edges.shape[0]
    num_val_edges = int(num_edges * p_val)
    if max_num_val is not None:
        num_val_edges = min(num_val_edges, max_num_val)

    shuffled = np.random.permutation(num_edges)
    which_val = shuffled[:num_val_edges]
    which_train = shuffled[num_val_edges:]
    train_ones = edges[which_train]
    val_ones = edges[which_val]
    A_train = sp.coo_matrix((np.ones_like(train_ones.T[0]), (train_ones.T[0], train_ones.T[1])),
                            shape=A.shape).tocsr()
    A_train = A_train.maximum(A_train.T)

    num_nodes = A.shape[0]
    num_val_nonedges = neg_mul * num_val_edges
    candidate_zeros = np.random.choice(np.arange(num_nodes, dtype=np.int32),
                                       size=(2 * num_val_nonedges, 2), replace=True)
    cne1, cne2 = candidate_zeros[:, 0], candidate_zeros[:, 1]
    to_keep = (1 - A[cne1, cne2]).astype(np.bool).A1
    val_zeros = candidate_zeros[to_keep][:num_val_nonedges]
    if to_keep.sum() < num_val_nonedges:
        raise ValueError("Couldn't produce enough non-edges")

    return A_train, val_ones, val_zeros
def project_dot_onemode(superclass, biadjmatrix, onemode):
    """ Get the weigthed adjacency matrix of the onemode graph by matrix multiplication """
    t_start = time.time()
    if onemode == "t":
        wmatrix = np.dot(biadjmatrix, biadjmatrix.T)
    elif onemode == "b":
        wmatrix = np.dot(biadjmatrix.T, biadjmatrix)
    print(
        f"[Time] onemode-dotproduct {onemode} {time.time() - t_start:.3f} sec")
    print(f"[Info] wmatrix {onemode} type {type(wmatrix)}")
    print(f"[Info] wmatrix {onemode} dtype {wmatrix.dtype}")
    print(
        f"[Info] wmatrix {onemode} nbytes in GB {(wmatrix.data.nbytes + wmatrix.indptr.nbytes + wmatrix.indices.nbytes) / (1000 ** 3):.6f}"
    )
    print(f"[Info] wmatrix {onemode} shape {wmatrix.shape}")
    print(f"[Info] wmatrix {onemode} maxelement {wmatrix.max()}")
    count_nonzeroes = wmatrix.nnz
    max_nonzeroes = wmatrix.shape[0] * (wmatrix.shape[0] - 1)
    matrix_density = count_nonzeroes / max_nonzeroes
    print(f"[Info] wmatrix {onemode} density {matrix_density:.4f}")
    t_start = time.time()
    wmatrix = sparse.tril(wmatrix, k=-1)
    print(f"[Time] wmatrix {onemode} tril {time.time() - t_start:.3f} sec")
    wmatrix = wmatrix.tocsr()
    print(f"[Info] wmatrix {onemode} tril type {type(wmatrix)}")
    print(
        f"[Info] wmatrix {onemode} tril nbytes data in GB {(wmatrix.data.nbytes) / (1000 ** 3):.6f}"
    )
    t_start = time.time()
    sparse.save_npz(f"out/{superclass}.{onemode}.npz", wmatrix)
    print(f"[Time] save-npz {onemode} {time.time() - t_start:.3f} sec")
Exemple #33
0
def test_topological_nodes(n=100):
    g = dgl.DGLGraph()
    a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))
    b = sp.tril(a, -1).tocoo()
    g.from_scipy_sparse_matrix(b)

    layers_dgl = dgl.topological_nodes_generator(g)

    adjmat = g.adjacency_matrix()
    def tensor_topo_traverse():
        n = g.number_of_nodes()
        mask = F.copy_to(F.ones((n, 1)), F.cpu())
        degree = F.spmm(adjmat, mask)
        while F.reduce_sum(mask) != 0.:
            v = F.astype((degree == 0.), F.float32)
            v = v * mask
            mask = mask - v
            frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu())
            yield frontier
            degree -= F.spmm(adjmat, v)

    layers_spmv = list(tensor_topo_traverse())

    assert len(layers_dgl) == len(layers_spmv)
    assert all(toset(x) == toset(y) for x, y in zip(layers_dgl, layers_spmv))
Exemple #34
0
def support_dropout(adj, p, drop_edge=False):
    assert 0.0 < p < 1.0
    lower_adj = sp.tril(adj)
    n_nodes = lower_adj.shape[0]

    # find nodes to isolate
    isolate = np.random.choice(range(n_nodes),
                               size=int(n_nodes * p),
                               replace=False)
    s_idx, e_idx = lower_adj.nonzero()

    # mask the nodes that have been selected
    # here mask contains all the nodes that have been selected in isolated
    # regardless whether it is source node or end node of an edge
    mask = np.in1d(s_idx, isolate)
    mask += np.in1d(e_idx, isolate)
    # csr_matrix.data is the array storing the non-zero elements, it is usually much
    # fewer than csr_matrix.shape[0] * csr_matrix.shape[1]
    assert mask.shape[0] == lower_adj.data.shape[0]

    lower_adj.data[mask] = 0
    lower_adj.eliminate_zeros()

    if drop_edge:
        prob = np.random.uniform(0, 1, size=lower_adj.data.shape)
        remove = prob < p
        lower_adj.data[remove] = 0
        lower_adj.eliminate_zeros()

    lower_adj = lower_adj + lower_adj.transpose()
    return lower_adj
Exemple #35
0
    def fast_solve(self):

        self.precondieitoner()

        tgdof = self.tensorspace.number_of_global_dofs()
        vgdof = self.vectorspace.number_of_global_dofs()
        gdof = tgdof + vgdof

        start = timer()
        print("Construting linear system ......!")
        self.M, self.B = self.get_left_matrix()
        S = self.B@spdiags(1/self.D, 0, tgdof, tgdof)@self.B.transpose()
        self.SL = tril(S).tocsc()
        self.SU = triu(S, k=1).tocsr()

        self.SUT = self.SL.transpose().tocsr()
        self.SLT = self.SU.transpose().tocsr()

        b = self.get_right_vector()

        AA = bmat([[self.M, self.B.transpose()], [self.B, None]]).tocsr()
        bb = np.r_[np.zeros(tgdof), b]
        end = timer()
        print("Construct linear system time:", end - start)


        start = timer()
        P = LinearOperator((gdof, gdof), matvec=self.linear_operator)
        x, exitCode = gmres(AA, bb, M=P, tol=1e-8)
        print(exitCode)
        end = timer()

        print("Solve time:", end-start)
        self.sh[:] = x[0:tgdof]
        self.uh[:] = x[tgdof:]
Exemple #36
0
def _lower_neighbors(dist_mat, max_scale):
    """
    Converts a distance matrix to neighbor information.

    Takes a square, possibly lower triangular, and returns a list
    of lists of neighbor indices, for neighbors up to the specified scale.

    Parameters
    ----------
    dist_mat: 2D array
        the distance matrix, which may be lower triangular
    max_scale: float
        the highest scale (distance) to consider

    Returns
    -------
    neighbors: list of lists of int
    """
    d = sp.lil_matrix(dist_mat)
    d[d == 0] = sys.float_info.epsilon
    d[np.diag_indices(d.shape[0])] = 0
    d[d > max_scale] = 0
    d = sp.tril(d)
    result = [[] for i in range(d.shape[0])]
    for k, v in np.transpose(d.nonzero()):
        result[k].append(v)
    return result
 def nbr_idx_pairs(self):
     """
     Returns a 2d array with 2 columns. Each row contains the node idxs of
     a pair of neighbors in the graph. Each pair is only returned once, so
     for example only one of (0,3) and (3,0) could appear as rows.
     """
     return np.argwhere(sparse.tril(self.is_nbr))
Exemple #38
0
 def setUp(self):
     n = 50
     nrhs = 20
     self.A = sp.rand(n, n, 0.4) + sp.identity(n)
     self.sol = np.ones((n, nrhs))
     self.rhsU = sp.triu(self.A) * self.sol
     self.rhsL = sp.tril(self.A) * self.sol
 def get_Qc(self,x0):
     if hasattr(self.F, 'shape'): 
         FT = self.F.T
     else:
         FT = self.F
     Q =  sparse.tril(FT * self.P * self.F)
     c = FT*(self.q + self.P*x0)
     return Q,c
Exemple #40
0
def is_tri(X):
    diag = X.diagonal().sum()
    if sparse.issparse(X):
        if not (sparse.tril(X).sum() - diag) or \
           not (sparse.triu(X).sum() - diag):
            return True
    elif not np.triu(X, 1).sum() or not np.tril(X, -1).sum():
        return True
    else:
        return False
Exemple #41
0
    def _h(self, x, lagrange, obj_factor, flag, user_data=None):
        if flag:
#            return (self._Hrow, self._Hcol)
            return (self._Hcol, self._Hrow)
        else:
            neqnln = user_data["neqnln"]
            niqnln = user_data["niqnln"]
            lmbda = {"eqnonlin": lagrange[:neqnln],
                     "ineqnonlin": lagrange[neqnln:neqnln + niqnln]}
            H = tril(self._hessfcn(x, lmbda), format="coo")
            return H.data
Exemple #42
0
    def triangular_lower(self, k=0):
        """
        Returns the lower triangular portion of this matrix.
        :param k:
            - k = 0 corresponds to the main diagonal
            - k > 0 is above the main diagonal
            - k < 0 is below the main diagonal

        TODO: Add unit tests
        """
        return self._new_instance(sp.tril(self.matrix, k=k))
Exemple #43
0
def gseidel(A, b, x, maxit=1000, tol=10e-13, relax=1., normalizer=None):
    """ Gauss-Jacobi iterative linear solver for sparse matrices

    Parameters
    ------------
    A : sparse matrix, shape (n, n)
        Left hand side of linear system.
    b : array, array (n, )
        Right hand side of linear system.
    x : array, array (n, )
        On entry, `x` holds the initial guess. On exit `x` holds the final solution.
    tol : float, optional
        Requested error tolerance for convergence.
    maxit : int, optional 
        Maximum number of iterations.
    relax : float, optional
        Relaxation parameter. Default is 1 in Gauss-Seidel. Set
        to values of less than or greater to 1 for under or over relaxation.

    Returns
    -----------
    info : int
        Exit status. 0 if converged. -1 if it did not.
    iter : int
        Number of iterations
    relres : float
        total variance norm of the final solution.

    See Also
    ---------
    gjacobi, sparse_power_iteration


    Notes
    --------

    Code based on gseidel in the compecon Matlab toolbox.

    """
    Q = sparse.tril(A).tocsr()
    info = -1
    for iter in range(maxit):
        print iter
        dx = spla.spsolve(Q, b - A.dot(x))
        x += dx * relax
        if normalizer:
            normalizer(x)
        relres = tvnorm(dx)
        print relres, tol
        if relres < tol:
            info = 0
            break
    iter += 1
    return (info, iter, relres)
Exemple #44
0
def test_sparse_cholesky(A):
    # Perform decompositions
    np_chlsky = np.linalg.cholesky(A)

    L = tril(csr_matrix(np_chlsky), format='csr')
    # Check L is actually sparse!
    assert len(L.data) < np.product(A.shape)
    assert issparse(L)
    # Test sparse implementation
    py_chlsky = sparse_ldl.cholesky(A, L.indptr, L.indices)

    # Check implementation of Cholesky above is the same as numpy
    np.testing.assert_allclose(L.data, py_chlsky)
    def __init__(self, A, *args, **kwargs):
        """Initialization routine for the smoother

        Args:
            A (scipy.sparse.csc_matrix): sparse matrix A of the system to solve
            *args: Variable length argument list
            **kwargs: Arbitrary keyword arguments
        """
        super(GaussSeidel, self).__init__(A, *args, **kwargs)

        self.P = sp.tril(self.A, format='csc')
        # precompute inverse of the preconditioner for later usage
        self.Pinv = sp.linalg.inv(self.P)
Exemple #46
0
def test_sparse_modified_ldl(m, n):
    A = sparse_rand(m, n, density=0.025).toarray()
    A = np.dot(A, A.T)
    # Perform decompositions
    np_chlsky = cholesky(A)
    D, L = modified_ldl(A)
    py_chlsky = L*np.sqrt(D)

    L = tril(csr_matrix(L), format='csr')
    # Check L is actually sparse!
    assert len(L.data) < np.product(A.shape)
    assert issparse(L)

    spD, spL = sparse_ldl.modified_ldl(A, L.indptr, L.indices)
    # Check implementation of Cholesky above is the same as numpy
    np.testing.assert_allclose(spD, D)
    np.testing.assert_allclose(spL, L.data, atol=1e-7)
Exemple #47
0
  def __init__(self, relaxer_factory, fine_level, aggregate_index):
    # Galerkin coarsening
    self.P = util.caliber_one_interpolation(aggregate_index)
    self.R = np.transpose(self.P)
    nc = self.R.shape[0]
    inv_agg_size = 1./np.sum(self.R, axis=1).flatten()
    self.T = spdiags(inv_agg_size, 0, nc, nc) * self.R
    self.A = self.R * fine_level.A * self.P
    super(_CoarseLevel, self).__init__(relaxer_factory)

    self.fine_level = fine_level
    self.aggregate_index = aggregate_index

    # Recover the graph and adjacency matrix from A.
    # TODO: replace building W by the proper sub-edgelist of A (more efficient?)
    self.W = -tril(self.A, k=-1) - triu(self.A, k=1)
    self.g = nx.from_scipy_sparse_matrix(self.W)
Exemple #48
0
def test_1d_fused_lasso(n=100):

    l1 = 1.


    sparsity1 = l1norm(n, l=l1)
    D = (np.identity(n) - np.diag(np.ones(n-1),-1))[1:]
    extra = np.zeros(n)
    extra[0] = 1.
    D = np.vstack([D,extra])
    D = sparse.csr_matrix(D)

    fused = seminorm(l1norm(D, l=l1))

    X = np.random.standard_normal((2*n,n))
    Y = np.random.standard_normal((2*n,))
    regloss = squaredloss(X,Y)
    p=regloss.add_seminorm(fused)
    solver=FISTA(p)
    solver.debug = True
    vals1 = solver.fit(max_its=25000, tol=1e-12)
    soln1 = solver.problem.coefs

    B = np.array(sparse.tril(np.ones((n,n))).todense())
    X2 = np.dot(X,B)

    time.sleep(3)
    
    D2 = np.diag(np.ones(n))
    p2 = lasso.gengrad((X2, Y))
    p2.assign_penalty(l1=l1)
    opt = FISTA(p2)
    opt.debug = True
    opt.fit(tol=1e-12,max_its=25000)
    beta = opt.problem.coefs
    soln2 = np.dot(B,beta)

    print soln1[range(10)]
    print soln2[range(10)]
    print p.obj(soln1), p.obj(soln2)
    #np.testing.assert_almost_equal(soln1,soln2)

    return vals1
Exemple #49
0
    def _solve(self, x0, A, l, u, xmin, xmax):
        """ Solves using the Interior Point OPTimizer.
        """
        # Indexes of constrained lines.
        il = [i for i,ln in enumerate(self._ln) if 0.0 < ln.rate_a < 1e10]
        nl2 = len(il)

        neqnln = 2 * self._nb # no. of non-linear equality constraints
        niqnln = 2 * len(il)  # no. of lines with constraints

        user_data = {"A": A, "neqnln": neqnln, "niqnln": niqnln}

        self._f(x0)
        Jdata = self._dg(x0, False, user_data)
#        Hdata = self._h(x0, ones(neqnln + niqnln), None, False, user_data)

        lmbda = {"eqnonlin": ones(neqnln),
                 "ineqnonlin": ones(niqnln)}
        H = tril(self._hessfcn(x0, lmbda), format="coo")
        self._Hrow, self._Hcol = H.row, H.col

        n = len(x0) # the number of variables
        xl = xmin
        xu = xmax
        gl = r_[zeros(2 * self._nb), -Inf * ones(2 * nl2), l]
        gu = r_[zeros(2 * self._nb),       zeros(2 * nl2), u]
        m = len(gl) # the number of constraints
        nnzj = len(Jdata) # the number of nonzeros in Jacobian matrix
        nnzh = 0#len(H.data) # the number of non-zeros in Hessian matrix

        f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn = \
            self._f, self._df, self._g, self._dg, self._h

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             f_fcn, df_fcn, g_fcn, dg_fcn)#, h_fcn)

#        print dir(nlp)
#        nlp.str_option("print_options_documentation", "yes")
#        nlp.int_option("max_iter", 10)

#        x, zl, zu, obj = nlp.solve(x0)
        success = nlp.solve(x0, user_data)
        nlp.close()
Exemple #50
0
def GaussSeidel(A, b, MAXITER, TOLL):
    n = len(b)
    xk = np.ones(shape = n,dtype = float)
     
    D = sparse.diags(A.diagonal(), 0, format = 'csc',)
    L = sparse.tril(A, format = 'csc')
    U = sparse.triu(A, format = 'csc')
    
    T = -(linalg.inv(D+L))* U
    c = (linalg.inv(D+L))* b
    
    i = 0
    err = TOLL + 1
    while i < MAXITER and err > TOLL:
        x = T*xk + c
        err = np.linalg.norm(x-xk, 1)/np.linalg.norm(x,1)
        xk = x
        i += 1

    return xk, i
Exemple #51
0
    def __init__(self, block_problem, set_tril=[0], *args, **kwargs):
        """Initialization routine for the smoother

        Args:
            block_problem (scipy.sparse.csc_matrix): A BlockProblem object
            *args: Variable length argument list
            **kwargs: Arbitrary keyword arguments
        """

        super(BlockGaussSeidel, self).__init__(block_problem, *args, **kwargs)

        assert len(set_tril) <= block_problem.n_layer, "not enough layers, choose another dimensions to set diagonal"
        assert min(set_tril) >= 0 and max(set_tril) < block_problem.n_layer, "set_tril is wrong"

        As = block_problem.As
        for a_row in As:
            for i in set_tril:
                a_row[i] = sp.tril(a_row[i], format='csc')
        self.P = BlockProblemBase.generate_A(As)
        self.Pinv = sp.linalg.inv(self.P)
def test_create_hint_mask_matrix(tensor, symmetric=False, keepConnections=False):

    # use only atom to atom indices for hint connection prediction
    atom_indices = np.zeros(tensor.getMatrixShape()[0])
    atom_indices[tensor.getAtomIndices()] = 1
    atom_vector = atom_indices[np.newaxis]
    atom_vector = lil_matrix(atom_vector)
    mask_matrix = atom_vector.multiply(atom_vector.T).tolil()
    mask_matrix.setdiag(0)

    # optionally exclude already existing connections from prediction
    if not keepConnections:
        connection_array = np.asarray(tensor.getSliceMatrix(SparseTensor.CONNECTION_SLICE).toarray())
        connection_indices = connection_array > 0.0
        mask_matrix[connection_indices] = 0

    # symmetric mask needed?
    if not symmetric:
        mask_matrix = tril(mask_matrix)

    return mask_matrix
Exemple #53
0
def is_symmetric_sparse(mat):
    from pyomo.contrib.pynumero.sparse.block_matrix import BlockMatrix
    # Note: this check is expensive
    flag = False
    if isinstance(mat, np.ndarray):
        flag = is_symmetric_dense(mat)
    elif isscalarlike(mat):
        flag = True
    elif isspmatrix(mat) or isinstance(mat, BlockMatrix):
        if mat.shape[0] != mat.shape[1]:
            flag = False
        else:
            if isinstance(mat, BlockMatrix):
                mat = mat.tocoo()
            # get upper and lower triangular
            l = tril(mat)
            u = triu(mat)
            diff = l - u.transpose()
            z = np.zeros(diff.nnz)
            flag = np.allclose(diff.data, z, atol=1e-6)
    else:
        raise RuntimeError("Format not recognized {}".format(type(mat)))
    return flag
Exemple #54
0
def test_1d_fused_lasso():

    """
    Check the 1d fused lasso solution using an equivalent lasso formulation
    """

    n = 100
    l1 = 1.
    
    D = (np.identity(n) - np.diag(np.ones(n-1),-1))[1:]
    extra = np.zeros(n)
    extra[0] = 1.
    D = np.vstack([D,extra])
    D = sparse.csr_matrix(D)

    fused = R.l1norm.linear(D, lagrange=l1)
    
    X = np.random.standard_normal((2*n,n))
    Y = np.random.standard_normal((2*n,))
    loss = R.quadratic.affine(X, -Y, coef=0.5)
    fused_lasso = R.container(loss, fused)
    solver=R.FISTA(fused_lasso)
    vals1 = solver.fit(max_its=25000, tol=1e-10)
    soln1 = solver.composite.coefs
    
    B = np.array(sparse.tril(np.ones((n,n))).todense())
    X2 = np.dot(X,B)
    
    loss = R.quadratic.affine(X2, -Y, coef=0.5)
    sparsity = R.l1norm(n, lagrange=l1)
    lasso = R.container(loss, sparsity)
    solver = R.FISTA(lasso)
    solver.fit(tol=1e-10)

    soln2 = np.dot(B, solver.composite.coefs)

    npt.assert_array_almost_equal(soln1, soln2, 3)
Exemple #55
0
def test_1d_fused_lasso(n=100):

    l1 = 1.

    sparsity1 = R.l1norm(n, lagrange=l1)
    D = (np.identity(n) - np.diag(np.ones(n-1),-1))[1:]
    extra = np.zeros(n)
    extra[0] = 1.
    D = np.vstack([D,extra])
    D = sparse.csr_matrix(D)

    fused = R.l1norm.linear(D, lagrange=l1)

    X = np.random.standard_normal((2*n,n))
    Y = np.random.standard_normal((2*n,))
    loss = R.quadratic.affine(X, -Y, coef=0.5)
    fused_lasso = R.container(loss, fused)
    solver=R.FISTA(fused_lasso)
    solver.debug = True
    vals1 = solver.fit(max_its=25000, tol=1e-12)
    soln1 = solver.composite.coefs

    B = np.array(sparse.tril(np.ones((n,n))).todense())
    X2 = np.dot(X,B)
Exemple #56
0
def adj2vec(G):
    r"""
    Prepare the graph for the gradient computation.

    Parameters
    ----------
    G : Graph structure

    """
    if not hasattr(G, 'directed'):
        G.is_directed()

    if G.directed:
        raise NotImplementedError("Not implemented yet.")

    else:
        v_i, v_j = (sparse.tril(G.W)).nonzero()
        weights = G.W[v_i, v_j]

        # TODO G.ind_edges = sub2ind(size(G.W), G.v_in, G.v_out)
        G.v_in = v_i
        G.v_out = v_j
        G.weights = weights
        G.Ne = np.shape(v_i)[0]
def basic(A,pname=None,omega=1.0):
    """ Jacobi, Gauss-Seidel, SOR and symmetric version

    J, GS, SGS, SOR, SSOR
    """
    n = A.shape[0]
    
    if pname is None:
        I    = eye(n,n).tocsr()
        L = I
        U = I
    elif pname=='J':
        # weigthed Jacobi
        I = eye(n,n).tocsr()
        D = spdiags(A.diagonal(),0,n,n).tocsr()
        #
        L = omega * D
        U = I
    elif pname=='GS':
        # Gauss Seidel
        I = eye(n,n).tocsr()
        D = spdiags(A.diagonal(),0,n,n).tocsr()
        E = tril(A,-1).tocsr()
        #
        L = D + E 
        U = I
    elif pname=='SGS':
        # Symmetric Gauss-Seidel
        D = spdiags(A.diagonal(),0,n,n).tocsr()
        Dinv =  spdiags(1/A.diagonal(),0,n,n).tocsr()
        E = tril(A,-1).tocsr()
        F = triu(A,1).tocsr()
        #
        L = (D+E)*Dinv
        U = D+F
    elif pname=='SOR':
        # Successive Overrelaxation
        I = eye(n,n).tocsr()
        D = spdiags(A.diagonal(),0,n,n).tocsr()
        E = tril(A,-1).tocsr()
        #
        L = D + omega * E 
        U = I
    elif pname=='SSOR':
        # Symmetric Successive Overrelaxation
        D = spdiags(A.diagonal(),0,n,n).tocsr()
        Dinv =  spdiags(1/A.diagonal(),0,n,n).tocsr()
        E = tril(A,-1).tocsr()
        F = triu(A,1).tocsr()
        #
        L = (D+omega*E)*Dinv
        U = D+omega*F
    else:
        print ">>>>>>>>>>>>>    Problem with the preconditioner name..."
        I    = eye(n,n).tocsr()
        L = I
        U = I
    
    L.sort_indices()
    U.sort_indices()
    return preconditioner_matvec(L,U)
Exemple #58
0
def graph_sparsify(M, epsilon, maxiter=10):
    r"""
    Sparsify a graph using Spielman-Srivastava algorithm.

    Parameters
    ----------
    M : Graph or sparse matrix
        Graph structure or a Laplacian matrix
    epsilon : int
        Sparsification parameter

    Returns
    -------
    Mnew : Graph or sparse matrix
        New graph structure or sparse matrix

    Note
    ----
    Epsilon should be between 1/sqrt(N) and 1

    Examples
    --------
    >>> from pygsp import graphs, operators
    >>> G = graphs.Sensor(256, Nc=20, distribute=True)
    >>> epsilon = 0.4
    >>> G2 = operators.graph_sparsify(G, epsilon)

    Reference
    ---------
    See :cite: `spielman2011graph` `rudelson1999random` `rudelson2007sampling`
    for more informations

    """
    # Test the input parameters
    if isinstance(M, Graph):
        if not M.lap_type == 'combinatorial':
            raise NotImplementedError
        L = M.L
    else:
        L = M

    N = np.shape(L)[0]

    if not 1./np.sqrt(N) <= epsilon < 1:
        raise ValueError('GRAPH_SPARSIFY: Epsilon out of required range')

    # pas sparse
    resistance_distances = resistance_distance(L).toarray()
    # Get the Weight matrix
    if isinstance(M, Graph):
        W = M.W
    else:
        W = np.diag(L.diagonal()) - L.toarray()
        W[W < 1e-10] = 0

    W = sparse.csc_matrix(W)

    start_nodes, end_nodes, weights = sparse.find(sparse.tril(W))

    # Calculate the new weights.
    weights = np.maximum(0, weights)
    Re = np.maximum(0, resistance_distances[start_nodes, end_nodes])
    Pe = weights * Re
    Pe = Pe / np.sum(Pe)

    for i in range(maxiter):
        # Rudelson, 1996 Random Vectors in the Isotropic Position
        # (too hard to figure out actual C0)
        C0 = 1 / 30.
        # Rudelson and Vershynin, 2007, Thm. 3.1
        C = 4 * C0
        q = round(N * np.log(N) * 9 * C**2 / (epsilon**2))

        results = stats.rv_discrete(values=(np.arange(np.shape(Pe)[0]), Pe)).rvs(size=q)
        spin_counts = stats.itemfreq(results).astype(int)
        per_spin_weights = weights / (q * Pe)

        counts = np.zeros(np.shape(weights)[0])
        counts[spin_counts[:, 0]] = spin_counts[:, 1]
        new_weights = counts * per_spin_weights

        sparserW = sparse.csc_matrix((new_weights, (start_nodes, end_nodes)),
                                     shape=(N, N))
        sparserW = sparserW + sparserW.T
        sparserL = sparse.diags(sparserW.diagonal(), 0) - sparserW

        if Graph(W=sparserW).is_connected():
            break
        elif i == maxiter - 1:
            logger.warning('Despite attempts to reduce epsilon, sparsified graph is disconnected')
        else:
            epsilon -= (epsilon - 1/np.sqrt(N)) / 2.

    if isinstance(M, Graph):
        sparserW = sparse.diags(sparserL.diagonal(), 0) - sparserL
        if not M.directed:
            sparserW = (sparserW + sparserW.T) / 2.

        Mnew = Graph(W=sparserW)
        M.copy_graph_attributes(Mnew)
    else:
        Mnew = sparse.lil_matrix(sparserL)

    return Mnew
Exemple #59
0
def cluster(data, k=30, directed=False, prune=False, min_cluster_size=10, jaccard=True,
            primary_metric='euclidean', n_jobs=-1, q_tol=1e-3, louvain_time_limit=2000,
            nn_method='kdtree'):
    """
    PhenoGraph clustering

    :param data: Numpy ndarray of data to cluster, or sparse matrix of k-nearest neighbor graph
        If ndarray, n-by-d array of n cells in d dimensions
        If sparse matrix, n-by-n adjacency matrix
    :param k: Number of nearest neighbors to use in first step of graph construction
    :param directed: Whether to use a symmetric (default) or asymmetric ("directed") graph
        The graph construction process produces a directed graph, which is symmetrized by one of two methods (see below)
    :param prune: Whether to symmetrize by taking the average (prune=False) or product (prune=True) between the graph
        and its transpose
    :param min_cluster_size: Cells that end up in a cluster smaller than min_cluster_size are considered outliers
        and are assigned to -1 in the cluster labels
    :param jaccard: If True, use Jaccard metric between k-neighborhoods to build graph.
        If False, use a Gaussian kernel.
    :param primary_metric: Distance metric to define nearest neighbors.
        Options include: {'euclidean', 'manhattan', 'correlation', 'cosine'}
        Note that performance will be slower for correlation and cosine.
    :param n_jobs: Nearest Neighbors and Jaccard coefficients will be computed in parallel using n_jobs. If n_jobs=-1,
        the number of jobs is determined automatically
    :param q_tol: Tolerance (i.e., precision) for monitoring modularity optimization
    :param louvain_time_limit: Maximum number of seconds to run modularity optimization. If exceeded
        the best result so far is returned
    :param nn_method: Whether to use brute force or kdtree for nearest neighbor search. For very large high-dimensional
        data sets, brute force (with parallel computation) performs faster than kdtree.

    :return communities: numpy integer array of community assignments for each row in data
    :return graph: numpy sparse array of the graph that was used for clustering
    :return Q: the modularity score for communities on graph
    """

    # NB if prune=True, graph must be undirected, and the prune setting takes precedence
    if prune:
        print("Setting directed=False because prune=True")
        directed = False

    if n_jobs == 1:
        kernel = jaccard_kernel
    else:
        kernel = parallel_jaccard_kernel
    kernelargs = {}

    # Start timer
    tic = time.time()
    # Go!
    if isinstance(data, sp.spmatrix) and data.shape[0] == data.shape[1]:
        print("Using neighbor information from provided graph, rather than computing neighbors directly", flush=True)
        lilmatrix = data.tolil()
        d = np.vstack(lilmatrix.data).astype('float32')  # distances
        idx = np.vstack(lilmatrix.rows).astype('int32')  # neighbor indices by row
        del lilmatrix
        assert idx.shape[0] == data.shape[0]
        k = idx.shape[1]
    else:
        d, idx = find_neighbors(data, k=k, metric=primary_metric, method=nn_method, n_jobs=n_jobs)
        print("Neighbors computed in {} seconds".format(time.time() - tic), flush=True)

    subtic = time.time()
    kernelargs['idx'] = idx
    # if not using jaccard kernel, use gaussian
    if not jaccard:
        kernelargs['d'] = d
        kernelargs['sigma'] = 1.
        kernel = gaussian_kernel
        graph = neighbor_graph(kernel, kernelargs)
        print("Gaussian kernel graph constructed in {} seconds".format(time.time() - subtic), flush=True)
    else:
        del d
        graph = neighbor_graph(kernel, kernelargs)
        print("Jaccard graph constructed in {} seconds".format(time.time() - subtic), flush=True)
    if not directed:
        if not prune:
            # symmetrize graph by averaging with transpose
            sg = (graph + graph.transpose()).multiply(.5)
        else:
            # symmetrize graph by multiplying with transpose
            sg = graph.multiply(graph.transpose())
        # retain lower triangle (for efficiency)
        graph = sp.tril(sg, -1)
    # write to file with unique id
    uid = uuid.uuid1().hex
    graph2binary(uid, graph)
    communities, Q = runlouvain(uid, tol=q_tol, time_limit=louvain_time_limit)
    print("PhenoGraph complete in {} seconds".format(time.time() - tic), flush=True)
    communities = sort_by_size(communities, min_cluster_size)
    # clean up
    for f in os.listdir():
        if re.search(uid, f):
            os.remove(f)

    return communities, graph, Q
        rowptr = self.A.indptr
        colind = self.A.indices
        x = np.empty_like(rhs)
        for i in reversed(xrange(self.A.shape[0])):
            ith_row = vals[rowptr[i] : rowptr[i+1]]
            cols = colind[rowptr[i] : rowptr[i+1]]
            x_vals = x[cols]
            x[i] = (rhs[i] - np.dot(ith_row[1:], x_vals[1:])) / ith_row[0]
        return x

    _solve1 = _solveM


if __name__ == '__main__':
    TOL = 1e-12
    n = 30
    A = sp.rand(n, n, 0.4) + sp.identity(n)
    AL = sp.tril(A)
    ALinv = ForwardSolver(AL)
    e = np.ones((n,5))
    rhs = AL * e
    x = ALinv * rhs
    print np.linalg.norm(e-x,np.inf), TOL

    AU = sp.triu(A)
    AUinv = BackwardSolver(AU)
    e = np.ones((n,5))
    rhs = AU * e
    x = AUinv * rhs
    print np.linalg.norm(e-x,np.inf), TOL