예제 #1
0
def test_graph_reverse_cuthill_mckee():
    A = np.array(
        [
            [1, 0, 0, 0, 1, 0, 0, 0],
            [0, 1, 1, 0, 0, 1, 0, 1],
            [0, 1, 1, 0, 1, 0, 0, 0],
            [0, 0, 0, 1, 0, 0, 1, 0],
            [1, 0, 1, 0, 1, 0, 0, 0],
            [0, 1, 0, 0, 0, 1, 0, 1],
            [0, 0, 0, 1, 0, 0, 1, 0],
            [0, 1, 0, 0, 0, 1, 0, 1],
        ],
        dtype=int,
    )

    graph = csr_matrix(A)
    perm = reverse_cuthill_mckee(graph)
    correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
    assert_equal(perm, correct_perm)

    # Test int64 indices input
    graph.indices = graph.indices.astype("int64")
    graph.indptr = graph.indptr.astype("int64")
    perm = reverse_cuthill_mckee(graph, True)
    assert_equal(perm, correct_perm)
def heuristica_bandwidth(file, symetric):
    """
    Descrição
    -----------
    Reduz a largura de banda de uma Matriz grandes com a Heurística RCM (Reverse-Cuthill-Mckee).
    Paramêtros
    ----------
    Symetric_mode: True (ou False), se a Matriz for simetrica (ou nao simetrica)\n
    filename: O nome do arquivo da matriz (extensoes .mtx, .mtz.gz)
    Retorno
    -------
    O tempo de execução da heurística de redução de largura de banda.

    Para maiores detalhes sobre a heurística desta classe:
    --------------------------------------------------------------
    Gonzaga de Oliveira, Sanderson L., "INTRODUÇÃO A HEURÍSTICAS PARA REDUÇÃO DE LARGURA DE BANDA DE MATRIZES", \
    Notas em Matemática Aplicada, Volume 75, (2014). E-ISSN 2236-5915.
    Para obter novas matrizes:
    --------------------------
    https://sparse.tamu.edu
    """

    # Carregando o arquivo de formao Matriz Market filename
    matriz = mmread(file)

    # Convertendo com CSR uma Matriz Esparsa
    G_sparse = csr_matrix(matriz)

    print(" Aplicando a Heuristica REVERSE-CUTHIL-MCKEE")
    t1 = time.time()
    print("\tInicio\t- ", date.datetime.fromtimestamp(t1))
    reverse_cuthill_mckee(G_sparse, symetric)
    t2 = time.time()
    print("\tTermino\t- ", date.datetime.fromtimestamp(t2))
    print(" Tempo de Execucao da Heuristica REVERSE-CUTHIL-MCKEE: ", t2 - t1)

    # Sumarizando o Dataset da Matriz
    print(
        "\n-------------------------------------------------------------------------------------------------"
    )
    print(" [SUMARIO] - Apresentando a Matriz")
    print(
        "-------------------------------------------------------------------------------------------------\n"
    )
    print(" Dimensao (NxN): \t", matriz.shape)
    print(" Elementos NONZERO: \t", matriz.nnz)
    print(" Arquivo da Matriz: \t", file)

    if symetric == None or symetric == False:
        texto = NONSYMETRIC
    else:
        texto = YESSYMETRIC

    print(" Matriz Simétrica: \t", symetric, texto)

    return (t2 - t1)
예제 #3
0
    def salvar_permutacao_rcm(self):
        """Salva a permutação feita pelo algoritmo de redução da banda da matriz de rigidez Reverse Cuthill Mckee."""
        # Interface Julia
        julia = Main
        julia.eval('include("julia_core/Deslocamentos.jl")')

        julia.kelems = self.matrizes_rigidez_elementos_poligonais(
            self.dados) + self.matrizes_rigidez_barras()
        julia.gls_elementos = [
            i + 1 for i in self.dados.graus_liberdade_elementos
        ]
        julia.gls_estrutura = [
            i + 1 if i != -1 else i
            for i in self.dados.graus_liberdade_estrutura
        ]
        julia.apoios = self.dados.apoios + 1

        julia.eval(
            'dados = Dict("kelems" => kelems, "gls_elementos" => gls_elementos, '
            '"gls_estrutura" => gls_estrutura, "apoios" => apoios)')

        linhas, colunas, termos = julia.eval(
            'matriz_rigidez_estrutura(kelems, dados, true)')

        # Número de graus de liberdade livres.
        ngl = self.dados.num_graus_liberdade() - len(self.dados.apoios)

        k = csr_matrix((termos, (linhas - 1, colunas - 1)), shape=(ngl, ngl))
        rcm = reverse_cuthill_mckee(k)

        # Salvar o vetor rcm.
        self.dados.salvar_arquivo_numpy(rcm, 11)
	def reduzir(self,matriz_nomeArquivo, simetrica = True):


		matriz = self.lerMatriz(matriz_nomeArquivo)
		
		#plot da matriz
		plt.rcParams['figure.figsize'] = (15,15)
		fig, axs = plt.subplots(1, 2)
		ax1 = axs[0]
		ax2 = axs[1]

		matriz_densa = matriz.todense() #para auxiliar no plot e no calculo da largura de banda

		ax1.spy(matriz_densa, markersize=1)
		ax1.set_title('Matriz Original',y=1.08)
		
		##segundo a documentação do RCM do Scipy essa transformação é necessária para matrizes assimetricas
		if not simetrica:
			matriz = matriz + matriz.T

		print("Largura de banda original",self.larguraBanda(matriz_densa))

		#vetor de permutação obtido ao aplicar o algoritmo Reverse Cuthill Mckee, utilizado para reordenar a matrix	
		perm_array = reverse_cuthill_mckee(matriz,symmetric_mode=True)

		#reordenação da matriz
		matriz = self.reordenarMatriz(matriz,perm_array)

		matriz_densa = matriz.todense() #para auxiliar no plot
		print("Largura de banda reduzida",self.larguraBanda(matriz_densa))

		ax2.spy(matriz_densa, markersize=1)
		ax2.set_title('Matriz Reordenada',y=1.08)
		plt.show()
예제 #5
0
def rev_cuthill_mckee(C, return_order=True):
    rorder = reverse_cuthill_mckee(csr_matrix(C))

    if return_order:
        return C[np.ix_(rorder, rorder)], rorder
    else:
        return C[np.ix_(rorder, rorder)]
예제 #6
0
def get_cuthill_mckee(weights) :
    graph = csr_matrix(weights)
    order = reverse_cuthill_mckee(graph)
    permutation = [0 for i in range(len(weights))]
    for i in range(len(weights)) :
        permutation[order[i]] = i
    return permutation
예제 #7
0
def find_permutation(graph, method, verbose=True):
	n = len(graph)
	if verbose:
		identity_i2p = {i : i for i in range(n)}
		bandwidth_initial = score(graph, 'max', identity_i2p)
		MLA_initial = score(graph, 'sum', identity_i2p)
		x = list(range(n))
		random.shuffle(x)
		random_i2p = {i : v for i, v in enumerate(x)}
		bandwidth_random = score(graph, 'max', random_i2p)
		MLA_random = score(graph, 'sum', random_i2p)
	if method == 'max':
		x = np.zeros((n,n))
		for i in range(n):
			for j in graph[i]['neighbors']:
				x[i][j] = 1
				x[j][i] = 1
		csr = csr_matrix(x)
		permutation = reverse_cuthill_mckee(csr, symmetric_mode=True)
		I, J = np.ix_(permutation, permutation)
		i2p = {v : i for i,v in enumerate(permutation)}
		x2 = x[I,J]
		permutation = list(permutation)
	else:
		i2p, permutation = MLA(graph, n)
	if verbose:
		bandwidth_final = score(graph, 'max', i2p)
		MLA_final = score(graph, 'sum', i2p)
		return i2p, permutation, {'Initial bandwidth' : bandwidth_initial, 'Random bandwidth' : bandwidth_random, 'Final bandwidth' : bandwidth_final, 
		'Initial MLA' : MLA_initial, 'Random MLA' : MLA_random, 'Final MLA' : MLA_final}
	else:
		return i2p, permutation
예제 #8
0
def transform_to_band(A, B, alg=False):
    if not alg:
        return A.A.tolist(), list(chain.from_iterable(B.A.tolist()))
    # res = A @ B
    # print("A=", A.A)
    # print("b=", B.A)
    #
    # print()

    a = csr_matrix(A)
    p = reverse_cuthill_mckee(a, symmetric_mode=True)
    band_a = A[np.ix_(p, p)]
    band_b = csr_matrix(B.A[p, :])
    # print("p=", p)

    # band_result = band_a @ band_b
    # print("band_a=", band_a.toarray())
    # print("band_b=", band_b.toarray())

    # print(are_equal(band_result.toarray(), res.A))
    # print("a*b=", band_result.toarray())

    # print("A*b=", res.A)
    # print("nz", band_a.nnz)
    # plt.spy(A.A, markersize=marksize)
    # plt.show()
    # plt.spy(band_a.toarray(), markersize=marksize)
    # plt.show()
    return band_a.toarray().tolist(), list(chain.from_iterable(band_b.toarray().tolist()))
예제 #9
0
    def _best_subset(self, num_qubits, num_meas, num_cx):
        """Computes the qubit mapping with the best connectivity.

        Args:
            num_qubits (int): Number of subset qubits to consider.

        Returns:
            ndarray: Array of qubits to use for best connectivity mapping.
        """
        from scipy.sparse import coo_matrix, csgraph

        if num_qubits == 1:
            return np.array([0])
        if num_qubits == 0:
            return []

        rows, cols, best_map = best_subset(
            num_qubits,
            self.adjacency_matrix,
            num_meas,
            num_cx,
            self._use_error,
            self.coupling_map.is_symmetric,
            self.error_mat,
        )
        data = [1] * len(rows)
        sp_sub_graph = coo_matrix((data, (rows, cols)),
                                  shape=(num_qubits, num_qubits)).tocsr()
        perm = csgraph.reverse_cuthill_mckee(sp_sub_graph)
        best_map = best_map[perm]
        return best_map
예제 #10
0
def rcm_reorder(adj):
    #---graph reordering using RCM---
    node_count = adj.shape[0]
    reindex_reverse = np.array(list(reverse_cuthill_mckee(adj.tocsr(),symmetric_mode=True)))
    reindex = np.zeros((node_count,),int)
    for i in range(node_count):
        reindex[reindex_reverse[i]] = i
    return reindex
예제 #11
0
def test_graph_reverse_cuthill_mckee():
    A = np.array([[1, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 1],
                  [0, 1, 1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0],
                  [1, 0, 1, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 1],
                  [0, 0, 0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0, 1]],
                 dtype=int)

    graph = csr_matrix(A)
    perm = reverse_cuthill_mckee(graph)
    correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
    assert_equal(perm, correct_perm)

    # Test int64 indices input
    graph.indices = graph.indices.astype('int64')
    graph.indptr = graph.indptr.astype('int64')
    perm = reverse_cuthill_mckee(graph, True)
    assert_equal(perm, correct_perm)
예제 #12
0
    def _best_subset(self, n_qubits):
        """Computes the qubit mapping with the best connectivity.

        Args:
            n_qubits (int): Number of subset qubits to consider.

        Returns:
            ndarray: Array of qubits to use for best connectivity mapping.
        """
        if n_qubits == 1:
            return np.array([0])

        device_qubits = self.coupling_map.size()

        cmap = np.asarray(self.coupling_map.get_edges())
        data = np.ones_like(cmap[:, 0])
        sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])),
                                shape=(device_qubits, device_qubits)).tocsr()
        best = 0
        best_map = None
        # do bfs with each node as starting point
        for k in range(sp_cmap.shape[0]):
            bfs = cs.breadth_first_order(sp_cmap,
                                         i_start=k,
                                         directed=False,
                                         return_predecessors=False)

            connection_count = 0
            sub_graph = []
            for i in range(n_qubits):
                node_idx = bfs[i]
                for j in range(sp_cmap.indptr[node_idx],
                               sp_cmap.indptr[node_idx + 1]):
                    node = sp_cmap.indices[j]
                    for counter in range(n_qubits):
                        if node == bfs[counter]:
                            connection_count += 1
                            sub_graph.append([node_idx, node])
                            break

            if connection_count > best:
                best = connection_count
                best_map = bfs[0:n_qubits]
                # Return a best mapping that has reduced bandwidth
                mapping = {}
                for edge in range(best_map.shape[0]):
                    mapping[best_map[edge]] = edge
                new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in sub_graph]
                rows = [edge[0] for edge in new_cmap]
                cols = [edge[1] for edge in new_cmap]
                data = [1] * len(rows)
                sp_sub_graph = sp.coo_matrix(
                    (data, (rows, cols)), shape=(n_qubits, n_qubits)).tocsr()
                perm = cs.reverse_cuthill_mckee(sp_sub_graph)
                best_map = best_map[perm]
        return best_map
예제 #13
0
def compute_rcm_permutation(neighbor_list):
    """Computes the permutation that minimizes the bandwidth of the connectivity matrix
    - uses Reverse CutHill-McKee (RCM) algorithm, which needs CSC or CSR sparse matrix format"""

    A = convert_neighbor_list_to_compressed_matrix(neighbor_list, sparse.csr_matrix)

    # Note we are always dealing with undirected (symmetric) graphs
    permutation = graph.reverse_cuthill_mckee(A,symmetric_mode=True)

    print_bandwidth_before_after_permutation(A,permutation)

    return permutation.tolist()
예제 #14
0
    def stability(self, omega, J_z, it=None):
        """Calculate B, Hills matrix.

        The 2n eigenvalues of B with lowest imaginary part, is the estimated
        Floquet exponents. They are collected in B_tilde.

        Returns
        -------
        B: ndarray (2Nh+1)2n x (2Nh+1)2n
            Hills coefficients
        """
        scale_x = self.hb.scale_x
        scale_t = self.hb.scale_t
        M0 = self.hb.M * scale_t**2 / scale_x
        C0 = self.hb.C * scale_t / scale_x
        K0 = self.hb.K / scale_x

        n = self.hb.n
        rcm_permute = self.hb.rcm_permute
        NH = self.hb.NH
        nu = self.hb.nu

        Delta2 = self.Delta2
        b2_inv = self.b2_inv

        omega2 = omega / nu
        # eq. 38
        Delta1 = C0
        for i in range(1, NH + 1):
            blk = np.vstack((np.hstack((C0, -2 * i * omega2 / scale_t * M0)),
                             np.hstack((2 * i * omega2 / scale_t * M0, C0))))
            Delta1 = block_diag(Delta1, blk)

        # eq. 45
        A0 = J_z / scale_x
        A1 = Delta1
        A2 = Delta2
        b1 = np.vstack((np.hstack(
            (A1, A0)), np.hstack((-np.eye(A0.shape[0]), np.zeros(A0.shape)))))

        # eq. 46
        mat_B = b2_inv @ b1
        if rcm_permute:
            # permute B to get smaller bandwidth which gives faster linalg comp.
            p = reverse_cuthill_mckee(mat_B)
            B = mat_B[p]
        else:
            B = mat_B

        return B
예제 #15
0
def upper_bound_rcm(matrix=csr_matrix):
    rcm = csgraph.reverse_cuthill_mckee(matrix, symmetric_mode=True)
    matrix_aux = copy.deepcopy(matrix)
    cont = 0
    list_aux = list(range(0, len(rcm)))
    rcm = list(rcm)
    rcm.pop()
    for x in rcm:
        matrix_aux = swap_indices(list_aux.index(x), list_aux.index(cont),
                                  matrix_aux)
        list_aux[list_aux.index(x)], list_aux[list_aux.index(cont)] = \
            list_aux[list_aux.index(cont)], list_aux[list_aux.index(x)]
        cont += 1

    return matrix_aux
예제 #16
0
def reverseCuthillMckee(df):
    startTime = datetime.now()
    print("    Calculating reverse Cuthill-McKee ordering...")
    # We first change the DF to a np array
    df_np = df.to_numpy()
    # Which is then converted to a scipy CSR graph
    graph = csr_matrix(df_np)
    # On which the algorithm is run
    graph_ordered = reverse_cuthill_mckee(graph, symmetric_mode=False)
    # We return a list of the ordering (indices)

    endTime = datetime.now() - startTime
    print("    Done! (%s.%s seconds)" %
          (endTime.seconds, endTime.microseconds))

    return graph_ordered
예제 #17
0
파일: nodes.py 프로젝트: soltesz-lab/RBF
def neighbor_argsort(nodes, m=None):
  '''
  Returns a permutation array that sorts `nodes` so that each node and
  its `m` nearest neighbors are close together in memory. This is done
  through the use of a KD Tree and the Reverse Cuthill-McKee
  algorithm.

  Parameters
  ----------
  nodes : (n, d) float array
  
  m : int, optional
         
  Returns
  -------
  (N,) int array

  Examples
  --------
  >>> nodes = np.array([[0.0, 1.0],
                        [2.0, 1.0],
                        [1.0, 1.0]])
  >>> idx = neighbor_argsort(nodes, 2)
  >>> nodes[idx]
  array([[ 2.,  1.],
         [ 1.,  1.],
         [ 0.,  1.]])

  '''
  nodes = np.asarray(nodes, dtype=float)
  assert_shape(nodes, (None, None), 'nodes')
  
  if m is None:
    # this should be roughly equal to the stencil size for the RBF-FD
    # problem
    m = 5**nodes.shape[1]

  m = min(m, nodes.shape[0])
  # find the indices of the nearest m nodes for each node
  _, idx = KDTree(nodes).query(nodes, m)
  # efficiently form adjacency matrix
  col = idx.ravel()
  row = np.repeat(np.arange(nodes.shape[0]), m)
  data = np.ones(nodes.shape[0]*m, dtype=bool)
  mat = csc_matrix((data, (row, col)), dtype=bool)
  permutation = reverse_cuthill_mckee(mat)
  return permutation
예제 #18
0
def test_graph_reverse_cuthill_mckee_ordering():
    data = np.ones(63, dtype=int)
    rows = np.array([
        0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
        6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11,
        11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15
    ])
    cols = np.array([
        0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2, 7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7,
        13, 15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13, 1, 9, 11, 0, 2, 8, 10, 15,
        1, 3, 9, 11, 4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14, 5, 7, 10, 13, 15
    ])
    graph = coo_matrix((data, (rows, cols))).tocsr()
    perm = reverse_cuthill_mckee(graph)
    correct_perm = np.array(
        [12, 14, 4, 6, 10, 8, 2, 15, 0, 13, 7, 5, 9, 11, 1, 3])
    assert_equal(perm, correct_perm)
예제 #19
0
파일: nodes.py 프로젝트: whatyouknow123/RBF
def neighbor_argsort(nodes, m=10, vert=None, smp=None):
    ''' 
  Returns a permutation array that sorts *nodes* so that each node and 
  its *m* nearest neighbors are close together in memory. This is done 
  through the use of a KD Tree and the Reverse Cuthill-McKee 
  algorithm.

  Parameters
  ----------
  nodes : (N,D) array
    Node positions.

  m : int, optional
    Number of neighboring nodes to place close together in memory. 
    This should be about equal to the stencil size for RBF-FD method.

  Returns
  -------
  permutation: (N,) array 
    Sorting indices.

  Examples
  --------
  >>> nodes = np.array([[0.0,1.0],
                        [2.0,1.0],
                        [1.0,1.0]])
  >>> idx = neighbor_argsort(nodes,2)
  >>> nodes[idx]
  array([[ 2.,  1.],
         [ 1.,  1.],
         [ 0.,  1.]])

  '''
    nodes = np.asarray(nodes, dtype=float)
    m = min(m, nodes.shape[0])
    # find the indices of the nearest n nodes for each node
    idx, dist = neighbors(nodes, m, vert=vert, smp=smp)
    # efficiently form adjacency matrix
    col = idx.ravel()
    row = np.repeat(np.arange(nodes.shape[0]), m)
    data = np.ones(nodes.shape[0] * m, dtype=bool)
    mat = csr_matrix((data, (row, col)), dtype=bool)
    permutation = reverse_cuthill_mckee(mat)
    return permutation
예제 #20
0
def test_graph_reverse_cuthill_mckee_ordering():
    data = np.ones(63,dtype=int)
    rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 
                2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
                6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
                9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 
                12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
                14, 15, 15, 15, 15, 15])
    cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
                7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13, 
                15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
                1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
                4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
                5, 7, 10, 13, 15])
    graph = coo_matrix((data, (rows,cols))).tocsr()
    perm = reverse_cuthill_mckee(graph)
    correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
                0, 13, 7, 5, 9, 11, 1, 3])
    assert_equal(perm, correct_perm)
예제 #21
0
파일: stats.py 프로젝트: Strabes/dsutils
def cramers_corrected_matrix(df, reorder_cuthill_mckee = True):
    """
    Calculate Cramers V statistic with bias correction for all
    combinations of columns in pandas DataFrame df
    
    Parameters
    --------------------------
    df : pandas DataFrame - all columns must be categorical
    
    reorder_cuthill_mckee : boolean - whether to reorder to the columns
        based on the reverse Cuthill McKee algorithm applied to the
        matrix of Cramers V statistics
        
    Returns
    ---------------------------
    Z : numpy array with Cramers V statistics
    """
    cols = df.columns.tolist()
    Z = np.zeros((len(cols),len(cols)))
    
    for i,j in combinations_with_replacement(range(len(cols)),2):
        z = cramers_corrected_stat(
                pd.crosstab(df[cols[i]],df[cols[j]]).values
            )
        Z[i,j] = Z[j,i] = z
        
    if reorder_cuthill_mckee is True:
        perm = reverse_cuthill_mckee(
            csr_matrix(Z),
            symmetric_mode = True
        )
        cols = [cols[i] for i in perm]
        gx,gy = np.meshgrid(perm,perm)
        Z = (csr_matrix(Z)[gx,gy]).toarray()
        
    Z = pd.DataFrame(
            Z,
            index = cols,
            columns = cols
        )
    
    return(Z)
예제 #22
0
파일: nodes.py 프로젝트: cossatot/RBF
def _neighbor_argsort(nodes, m=None, vert=None, smp=None):
    '''
  Returns a permutation array that sorts `nodes` so that each node and
  its `m` nearest neighbors are close together in memory. This is done
  through the use of a KD Tree and the Reverse Cuthill-McKee
  algorithm.

  Returns
  -------
  (N,) int array
    Sorting indices.

  Examples
  --------
  >>> nodes = np.array([[0.0, 1.0],
                        [2.0, 1.0],
                        [1.0, 1.0]])
  >>> idx = neighbor_argsort(nodes, 2)
  >>> nodes[idx]
  array([[ 2.,  1.],
         [ 1.,  1.],
         [ 0.,  1.]])

  '''
    if m is None:
        # this should be roughly equal to the stencil size for the RBF-FD
        # problem
        if nodes.shape[1] == 2:
            m = 30
        elif nodes.shape[1] == 3:
            m = 50

    m = min(m, nodes.shape[0])
    # find the indices of the nearest n nodes for each node
    idx, dist = _neighbors(nodes, m, vert=vert, smp=smp)
    # efficiently form adjacency matrix
    col = idx.ravel()
    row = np.repeat(np.arange(nodes.shape[0]), m)
    data = np.ones(nodes.shape[0] * m, dtype=bool)
    mat = csc_matrix((data, (row, col)), dtype=bool)
    permutation = reverse_cuthill_mckee(mat)
    return permutation
	def reduzir_medirTempo(self,matriz_nomeArquivo, simetrica = True):

		#calcula o tempo de execução para reduzir a largura de banda, reordenando a matrix

		matriz = self.lerMatriz(matriz_nomeArquivo)

		inicio = time.time()

		##segundo a documentação do RCM do Scipy essa transformação é necessária para matrizes assimetricas
		if not simetrica:
			matriz = matriz + matriz.T

		#vetor de permutação obtido ao aplicar o algoritmo Reverse Cuthill Mckee, utilizado para reordenar a matrix	
		perm_array = reverse_cuthill_mckee(matriz,symmetric_mode=True)

		#reordenação da matriz
		matriz = self.reordenarMatriz(matriz,perm_array)

		fim = time.time()

		print("Tempo de execução: ",fim - inicio)
예제 #24
0
    def reorder_matrix(self, matrix, index_order):
        """Return dataframe with matrix row/columns in index_order.

        Parameters
        ----------
        matrix : a SciPy COO sparse matrix
            input sparse matrix
        index_order : list of ensembles or None
            order to list ensembles. If None, defaults to reverse
            Cuthill-McKee order.

        Returns
        -------
        pandas.DataFrame
            dataframe with rows/columns ordered as desired
        """
        #""" matrix must be a coo_matrix (I think): do other have same `data`
        #attrib?"""
        if index_order is None:
            # reorder based on RCM from scipy.sparse.csgraph
            rcm_perm = reverse_cuthill_mckee(matrix.tocsr())
            rev_perm_dict = {k: rcm_perm.tolist().index(k) for k in rcm_perm}
            perm_i = [rev_perm_dict[ii] for ii in matrix.row]
            perm_j = [rev_perm_dict[jj] for jj in matrix.col]

            new_matrix = scipy.sparse.coo_matrix(
                (matrix.data, (perm_i, perm_j)),
                shape=(self.n_ensembles, self.n_ensembles))
            reordered_labels = [self.number_to_string[k] for k in rcm_perm]
        else:
            reordered_labels = [
                self.number_to_string[k] for k in self.number_to_string.keys()
            ]
            new_matrix = matrix

        reordered = pd.DataFrame(new_matrix.todense())
        reordered.index = reordered_labels
        reordered.columns = reordered_labels
        return reordered
예제 #25
0
    def reorder_matrix(self, matrix, index_order):
        """Return dataframe with matrix row/columns in index_order.
        
        Parameters
        ----------
        matrix : a SciPy COO sparse matrix
            input sparse matrix
        index_order : list of ensembles or None
            order to list ensembles. If None, defaults to reverse
            Cuthill-McKee order.

        Returns
        -------
        pandas.DataFrame
            dataframe with rows/columns ordered as desired
        """
        #""" matrix must be a coo_matrix (I think): do other have same `data`
        #attrib?"""
        if index_order == None:
            # reorder based on RCM from scipy.sparse.csgraph
            rcm_perm = reverse_cuthill_mckee(matrix.tocsr())
            rev_perm_dict = {k : rcm_perm.tolist().index(k) for k in rcm_perm}
            perm_i = [rev_perm_dict[ii] for ii in matrix.row]
            perm_j = [rev_perm_dict[jj] for jj in matrix.col]

            new_matrix = scipy.sparse.coo_matrix(
                (matrix.data, (perm_i, perm_j)), 
                shape=(self.n_ensembles, self.n_ensembles)
            )
            reordered_labels = [self.number_to_string[k] for k in rcm_perm]
        else:
            reordered_labels = [self.number_to_string[k] 
                                for k in self.number_to_string.keys()]
            new_matrix = matrix

        reordered = pd.DataFrame(new_matrix.todense())
        reordered.index = reordered_labels
        reordered.columns = reordered_labels
        return reordered
    def ordered_graph(self, filename=None, dpi=600, width=None, height=None):
        def get_distances(xs, ys):
            z = np.abs(xs - ys) / 2
            return np.sqrt(2 * z**2) / MAX_DIST

        def get_colors(distances):
            cmap = plt.get_cmap("Dark2")
            return cmap(distances)

        def unroll(data):
            return [list(row) for row in data]

        nm = reverse_cuthill_mckee(self.matrix)
        ro = self.matrix[nm, :][:, nm]
        as_coo = ro.tocoo()
        y, x = as_coo.shape
        MAX_DIST = np.sqrt(2 * (x / 2.0)**2)

        colors = unroll(get_colors(get_distances(as_coo.col, as_coo.row)))

        plt.figure(figsize=(width or x / 1000, height or y / 1000))
        ax = plt.axes([0, 0, 1, 1])
        plt.scatter(
            list(as_coo.shape[1] - as_coo.col),
            list(as_coo.row),
            s=10,
            c=colors,
            marker=".",
            edgecolors="None",
        )
        ax.xaxis.set_ticks_position("none")
        ax.yaxis.set_ticks_position("none")
        ax.xaxis.set_ticklabels([])
        ax.yaxis.set_ticklabels([])
        ax.set_ylim((0, self.matrix.shape[0]))
        ax.set_xlim((0, self.matrix.shape[1]))
        plt.box(False)
        if filename:
            plt.savefig(filename, dpi=dpi)
예제 #27
0
    def GetCuthillMcKeePermutation(self,A):
        """Applies Cuthill-Mckee permutation to reduce the sparse matrix bandwidth

            input:
                A:                    [csc_matrix or csr_matrix]

            returns:
                perm:                 [1D array] of permutation such that A[perm,:][:,perm]
                                      has its non-zero elements closer to the diagonal
        """

        if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
            raise TypeError("Matrix must be in CSC or CSR sparse format "
                "for Cuthill-McKee permutation")

        if int(sp.__version__.split('.')[1]) >= 15:
            from scipy.sparse.csgraph import reverse_cuthill_mckee
            perm = reverse_cuthill_mckee(A)
        else:
            from Florence.Tensor import symrcm
            perm = symrcm(A)

        return perm
예제 #28
0
vtkexport("Poisson_fe_H20_mesh", fes, geom)
print('Mesh generation', time.time() - start)
bfes = mesh_boundary(fes)
cn = connected_nodes(bfes)

temp = NodalField(nfens=fens.count(), dim=1)
for j in cn:
    temp.set_ebc([j],
                 val=boundaryf(fens.xyz[j, 0], fens.xyz[j, 1], fens.xyz[j, 2]))
temp.apply_ebc()
femm = FEMMHeatDiff(material=m,
                    fes=fes,
                    integration_rule=GaussRule(dim=3, order=3))
S = femm.connection_matrix(geom)
perm = reverse_cuthill_mckee(S, symmetric_mode=True)
temp.numberdofs(node_perm=perm)
# temp.numberdofs()
start = time.time()
fi = ForceIntensity(magn=lambda x, J: Q)
F = femm.distrib_loads(geom, temp, fi, 3)
print('Heat generation load', time.time() - start)
start = time.time()
F += femm.nz_ebc_loads_conductivity(geom, temp)
print('NZ EBC load', time.time() - start)
start = time.time()
K = femm.conductivity(geom, temp)
print('Matrix assembly', time.time() - start)
start = time.time()
# lu = splu(K)
# T = lu.solve(F)
예제 #29
0
#
# Note that `i_start` is the index of the adjacency matrix.
# Thus, even though we have indexed our nodes starting at 1, in the adjacency matrix the indices start from 0 (as they always do in Python).

# + pycharm={"name": "#%%\n"}
import scipy.sparse.csgraph as csgraph

csgraph.breadth_first_order(gm.adjacency_matrix(G),
                            i_start=0,
                            return_predecessors=False)

# + [markdown] pycharm={"name": "#%% md\n"}
# ### 4. Reverse Cuthill McKee

# + pycharm={"name": "#%%\n"}
csgraph.reverse_cuthill_mckee(gm.adjacency_matrix(G))

# + [markdown] pycharm={"name": "#%% md\n"}
# # Grids
#
# ## 450. Triangular grid

# + pycharm={"name": "#%%\n"}
from scipy.spatial import Delaunay
import matplotlib.pyplot as plt

points = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
triangles = np.array([[0, 1, 2], [1, 2, 3]])
plt.triplot(points[:, 0], points[:, 1], triangles)

# + pycharm={"name": "#%%\n"}
    def compute_band_matrix(self,
                            dim_finale=1000,
                            QID_columns=None,
                            lista_sensibili_column=None,
                            lista_sensibili_r=None,
                            density=0.03,
                            plot=True,
                            withRCM=True):
        """
            Metodo per la lettura da file del dataframe e
            per la creazione di una matrice bandizzata
            tramite algoritmo reverse_cuthill_mckee
        """

        density = math.sqrt(density)

        # lettura file e dimensionamento matrice
        original_dataset = self.dataframe
        if original_dataset is not None and lista_sensibili_r is not None and lista_sensibili_column is not None and QID_columns is not None:
            if len(original_dataset.columns
                   ) < dim_finale + len(lista_sensibili_column):
                choose = input(
                    "Non ci sono abbastanza colonne, vuoi cambiare il numero di colonne da %d a %d? [s/n] "
                    % (dim_finale, len(original_dataset.columns) -
                       len(lista_sensibili_column)))
                if choose == "s":
                    dim_finale = len(
                        original_dataset.columns) - len(lista_sensibili_column)
                else:
                    return

            if len(original_dataset) < dim_finale:
                choose = input(
                    "Non ci sono abbastanza righe, vuoi cambiare il numero di righe da %d a %d? [y/N] "
                    % (dim_finale, len(original_dataset)))
                if choose == "y":
                    dim_finale = len(original_dataset)
                else:
                    return

            self.size_after_RCM = dim_finale
            # permutazione randomica valori da inserire nella matrice banda
            lista_sensibili_row = list()
            indice_righe = 0
            while len(lista_sensibili_row) < dim_finale and len(
                    lista_sensibili_row) < len(lista_sensibili_r):
                lista_sensibili_row.append(lista_sensibili_r[indice_righe])
                indice_righe += 1
            lista_QID_column = list()
            indice_righe = 0
            while len(lista_QID_column) < dim_finale and len(
                    lista_QID_column) < len(QID_columns):
                lista_QID_column.append(QID_columns[indice_righe])
                indice_righe += 1

            total_col = list()
            for pip in lista_QID_column:
                total_col.append(pip)
            for pip in lista_sensibili_column:
                total_col.append(pip)
            items_final = dict(zip(total_col, total_col))

            df_sensitive = original_dataset.iloc[lista_sensibili_row][
                lista_sensibili_column]
            df_square = original_dataset.iloc[lista_sensibili_row][
                lista_QID_column]

            sum_for_columns = df_square[lista_QID_column].sum()
            sum_total = 0
            for sum_column in sum_for_columns:
                sum_total += sum_column
            density_before = sum_total / (dim_finale * dim_finale)
            """
            df1 = df_square.sample(frac=density, axis=0)
            df2 = df1.sample(frac=density, axis=1)
            df3 = pd.DataFrame(1, index=df2.index, columns=df2.columns)
            df_square.update(df3)
            """
            num_rows = int(density * dim_finale)
            random_column = np.random.permutation(QID_columns)[:num_rows]
            random_row = np.random.permutation(lista_sensibili_row)[:num_rows]
            df_square.update(
                pd.DataFrame(1, index=random_row, columns=random_column))

            sum_for_columns = df_square[lista_QID_column].sum()
            sum_total = 0
            for sum_column in sum_for_columns:
                sum_total += sum_column
            self.density = sum_total / (dim_finale * dim_finale)

            print("Density before : %s -- %s -- %s" %
                  (density_before, self.density, density * density))

            if withRCM:
                # creazione matrice banda tramite riordine della matrice iniziale
                sparse = csr_matrix(df_square)
                order = reverse_cuthill_mckee(sparse)

                column_reordered = [df_square.columns[i] for i in order]
                df_square_band = df_square.iloc[order][column_reordered]
                df_sensitive_band = df_sensitive.iloc[order]

                final_df = pd.concat([df_square_band, df_sensitive_band],
                                     axis=1,
                                     join='inner')
                # calcolo larghezze di banda pre e post processing
                [i, j] = np.where(df_square == 1)
                bw = max(i - j) + max(j - i) + 1
                self.original_band = bw

                [i, j] = np.where(df_square_band == 1)
                bw1 = max(i - j) + max(j - i) + 1
                self.band_after_rcm = bw1

                # parametri per il plot delle matrici
                if plot:
                    f, (ax1, ax2) = pltt.subplots(1, 2, sharey=True)
                    ax1.spy(df_square, marker='.', markersize='3')
                    ax2.spy(df_square_band, marker='.', markersize='3')
                    pltt.show()
                    print("Bandwidth before RCM: ", bw)
                    print("Bandwidth after RCM", bw1)

                self.dataframe_bandizzato = final_df
                self.items_final = items_final
                self.lista_sensibili = lista_sensibili_column

            else:
                final_df = pd.concat([df_square, df_sensitive],
                                     axis=1,
                                     join='inner')

                [i, j] = np.where(self.df_square_complete == 1)
                bw = max(i - j) + max(j - i) + 1
                self.original_band = bw
                self.band_after_rcm = bw
                if plot:
                    print("Bandwidth before RCM: ", bw)

                self.dataframe_bandizzato = final_df
                self.items_final = items_final
                self.lista_sensibili = lista_sensibili_column
        else:
            print("Error 404: Dataset not found or file not found.")
예제 #31
0
    def _best_subset(self, num_qubits):
        """Computes the qubit mapping with the best connectivity.

        Args:
            num_qubits (int): Number of subset qubits to consider.

        Returns:
            ndarray: Array of qubits to use for best connectivity mapping.
        """
        if num_qubits == 1:
            return np.array([0])
        if num_qubits == 0:
            return []

        device_qubits = self.coupling_map.size()

        cmap = np.asarray(self.coupling_map.get_edges())
        data = np.ones_like(cmap[:, 0])
        sp_cmap = sp.coo_matrix(
            (data, (cmap[:, 0], cmap[:, 1])), shape=(device_qubits, device_qubits)
        ).tocsr()
        best = 0
        best_map = None
        best_error = np.inf
        best_sub = None
        # do bfs with each node as starting point
        for k in range(sp_cmap.shape[0]):
            bfs = cs.breadth_first_order(
                sp_cmap, i_start=k, directed=False, return_predecessors=False
            )

            connection_count = 0
            sub_graph = []
            for i in range(num_qubits):
                node_idx = bfs[i]
                for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[node_idx + 1]):
                    node = sp_cmap.indices[j]
                    for counter in range(num_qubits):
                        if node == bfs[counter]:
                            connection_count += 1
                            sub_graph.append([node_idx, node])
                            break

            if self.backend_prop:
                curr_error = 0
                # compute meas error for subset
                avg_meas_err = np.mean(self.meas_arr)
                meas_diff = np.mean(self.meas_arr[bfs[0:num_qubits]]) - avg_meas_err
                if meas_diff > 0:
                    curr_error += self.num_meas * meas_diff

                cx_err = np.mean([self.cx_mat[edge[0], edge[1]] for edge in sub_graph])
                if self.coupling_map.is_symmetric:
                    cx_err /= 2
                curr_error += self.num_cx * cx_err
                if connection_count >= best and curr_error < best_error:
                    best = connection_count
                    best_error = curr_error
                    best_map = bfs[0:num_qubits]
                    best_sub = sub_graph

            else:
                if connection_count > best:
                    best = connection_count
                    best_map = bfs[0:num_qubits]
                    best_sub = sub_graph

        # Return a best mapping that has reduced bandwidth
        mapping = {}
        for edge in range(best_map.shape[0]):
            mapping[best_map[edge]] = edge
        new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in best_sub]
        rows = [edge[0] for edge in new_cmap]
        cols = [edge[1] for edge in new_cmap]
        data = [1] * len(rows)
        sp_sub_graph = sp.coo_matrix((data, (rows, cols)), shape=(num_qubits, num_qubits)).tocsr()
        perm = cs.reverse_cuthill_mckee(sp_sub_graph)
        best_map = best_map[perm]
        return best_map
예제 #32
0
def rcm(A: spmatrix, b: ndarray) -> Tuple[spmatrix, ndarray, ndarray]:
    p = spg.reverse_cuthill_mckee(A, symmetric_mode=False)
    return A[p].T[p].T, b[p], p
예제 #33
0
def delta_buildmatrices():
    cols = []
    kappa = 5
    its = 24
    ind = []
    # run simulations
    print("===================")
    print("RUNNING SIMULATIONS")
    print("===================")
    if count:
        nsl = 0
        sl = 0
    for j in tqdm(indices):
        edgelist = list(graphs[int(j)].edges)
        colorlist = coloring[j][0]
        net = ColorNNetwork(colorlist.tolist(), edgelist)
        coldyn = simulate_Kuramoto(net,
                                   K=2,
                                   timesec=60,
                                   verbose=0,
                                   intrinsic=0)[0]
        s = dataout[int(j)]
        if count:
            if s:
                if sl >= 100:
                    pass
                else:
                    cols.append(coldyn)
                    ind.append(int(j))
                    sl += 1
            else:
                if nsl >= 100:
                    pass
                else:
                    cols.append(coldyn)
                    ind.append(int(j))
                    nsl += 1
            if sl >= 100 and nsl >= 100:
                break
        print(len(cols), len(ind))

    adjmatsnsl = []
    adjmatssl = []
    n = 30
    dataynsl = []
    dataysl = []
    # create adjacency matrices
    print("==================")
    print("ADJACENCY MATRICES")
    print("==================")
    if count:
        nsl = 0
        sl = 0
    for i, j in enumerate(tqdm(ind)):

        # index from the graphs
        graph = graphs[int(j)]

        # compute rcm
        rcm = np.asarray(reverse_cuthill_mckee(csr_matrix(\
                    nx.adjacency_matrix(graph).todense())
                    )
            )

        adjdyn = []
        # assigning colors
        for col in cols[i]:
            for x in range(n):
                for y in range(x + 1):
                    if graph.has_edge(x, y):
                        widthcol = width([col[x], col[y]], kappa)
                        graph.add_weighted_edges_from([(x, y, widthcol)])
            frame = nx.adjacency_matrix(graph).todense()[:,rcm][rcm,:] + \
                                np.diag(np.asarray(col)[rcm])
            adjdyn.append(frame)

        # pad iterations to uniform length
        frameseq = np.stack(np.asarray(adjdyn + [adjdyn[-1]] *
                                       ((its + 1) - len(cols[i]))),
                            axis=0)
        if count:
            s = dataout[int(j)]
            if s:
                sl += 1
            else:
                nsl += 1

            print("SYNC:", len(dataysl), "NONSYNC:", len(dataynsl))
            if sl > 100:
                pass
            else:
                adjmatssl.append(frameseq)
                dataysl.append(s)
            if nsl > 100:
                pass
            else:
                adjmatsnsl.append(frameseq)
                dataynsl.append(s)

            if sl > 100 and nsl > 100:
                break
    print(len(adjmatssl), len(dataynsl))
    print(len(adjmatsnsl), len(dataysl))

    #datain = np.stack(adjmats, axis=0)

    # save results
    with open(path + 'delta.npy', 'wb') as f:
        np.save(f, adjmatssl)
        np.save(f, dataysl)
        np.save(f, adjmatsnsl)
        np.save(f, dataynsl)
예제 #34
0
파일: reorder.py 프로젝트: ckanu13k/graphs
 def cuthill_mckee(G):
   sG = G.matrix(csr=True)
   order = ssc.reverse_cuthill_mckee(sG, symmetric_mode=True)
   return permute_graph(G, order)