Example #1
0
def ExactPrecond(P, Mass, L, F, Fspace, Backend="PETSc"):
    Schur = SchurPCD(Mass, L, F, "uBLAS")
    P = P.sparray()
    P.eliminate_zeros()

    P = P.tocsr()

    # P = IO.matToSparse(P)
    plt.spy(Schur)
    plt.savefig("plt2")
    P[Fspace[0].dim():Fspace[0].dim() + Fspace[1].dim(),
      Fspace[0].dim():Fspace[0].dim() +
      Fspace[1].dim()] = -Schur  #-1e-10*sp.identity(Schur.shape[0])
    u, s, v = svd(P.todense())
    print "#####################", np.sum(s > 1e-10)
    P.eliminate_zeros()
    # P[-2,-2] += 1
    u, s, v = svd(P.todense())
    print "#####################", np.sum(s > 1e-10)

    if Backend == "PETSc":
        return PETSc.Mat().createAIJ(size=P.shape,
                                     csr=(P.indptr, P.indices, P.data))
    else:
        return P  #.transpose()
Example #2
0
def plot_matrices(list_of_matrices):
    """Plots the given list of sparse matrices using plt.spy()"""
    nplots = len(list_of_matrices)
    for i, m in enumerate(list_of_matrices):
        pl.subplot(nplots, 1, i + 1)
        pl.spy(m)
    pl.savefig('sparsity.png')
Example #3
0
    def data(self):

        # SAVE PLOT

        plt.figure(figsize=(11, 10))
        plt.spy(self.G.adj, markersize=2)
        plt.suptitle("Random Clustered Graph - %s" % self.G.name)

        plt.text(
            -40, -5, "M: %d\nN: %d\nK: %d\nP(in): %.2f\nP(out): %.2f" %
            (self.G.M, self.G.N, self.G.K, self.G.PIN, self.G.POUT))

        plt.savefig(self.path + ("%s-rcg.png" % self.G.name))
        #plt.savefig(self.path + ("rcg-%d-%d-%.2f-%.2f.pdf" % (self.M, self.N, self.PIN, self.POUT)))
        plt.close()

        # SAVE GRAPH
        with open(self.path + ("%s-linked.csv" % self.G.name),
                  'w') as f:  # Just use 'w' mode in 3.x
            w = csv.DictWriter(f, self.G.graph.keys())
            w.writeheader()
            w.writerow(self.G.graph)

    # SAVE GRAPH
        np.savetxt(self.path + ("%s-adj.csv" % self.G.name),
                   self.G.adj,
                   delimiter=",",
                   fmt="%d")
Example #4
0
def look_at_a_form(form, fname="foo.png", xlim=None, ylim=None):
    from matplotlib import pylab as plt
    Ksp = assemble_as_scipy(form)
    plt.spy(Ksp)
    if xlim != None: plt.xlim(*xlim)
    if ylim != None: plt.ylim(*ylim)
    plt.savefig(fname, dpi=150)
 def SpyMatrix(self):
     try:
         import matplotlib.pylab as pl
         pl.spy(self.A)
         pl.show()
     except:
         raise Exception(
             "error in function Spy. Probably matplotlib not installed")
Example #6
0
def visualize_matrices(list_matrices, names):
    assert len(list_matrices) == len(
        names), "Names and number of matrices should be the same"
    for idx, mat in enumerate(list_matrices):
        plt.subplot(len(names), 1, idx + 1)
        plt.title(names[idx])
        plt.spy(mat, aspect="auto", markersize=0.5)
    plt.show()
Example #7
0
 def SpyMatrix(self):
     try:
         import matplotlib.pylab as pl
         pl.spy(self.A)
         pl.show()
     except:
         raise Exception(
             "error in function Spy. Probably matplotlib not installed")
Example #8
0
def con_matrix(epochs: mne.Epochs,
               freqs_mean: list,
               draw: bool = False) -> tuple:
    """
    Computes a priori channel connectivity across space and frequencies.

    Arguments:
        epochs: one participant Epochs object; contains channel information.
        freqs_mean: list of frequencies in frequency-band-of-interest used
          by MNE for power or coherence spectral density calculation.
        draw: option to plot the connectivity matrices, boolean.

    Returns:
        ch_con, ch_con_freq:

        - ch_con: connectivity matrix between channels along space based on
          their position, scipy.sparse.csr_matrix of shape
          (n_channels, n_channels).

        - ch_con_freq: connectivity matrix between channels along space and
          frequencies, scipy.sparse.csr_matrix of shape
          (n_channels*len(freqs_mean), n_channels*len(freqs_mean)).
    """

    # creating channel-to-channel connectivity matrix in space
    ch_con, ch_names_con = find_ch_connectivity(epochs.info, ch_type='eeg')

    ch_con_arr = ch_con.toarray()

    # duplicating the array 'freqs_mean' or 'freqs' times (PSD or CSD)
    # to take channel connectivity across neighboring frequencies into
    # account
    l_freq = len(freqs_mean)
    init = np.zeros((l_freq * len(ch_names_con), l_freq * len(ch_names_con)))
    for i in range(0, l_freq * len(ch_names_con)):
        for p in range(0, l_freq * len(ch_names_con)):
            if (p // len(ch_names_con) == i // len(ch_names_con)) or (
                    p // len(ch_names_con) == i // len(ch_names_con) +
                    1) or (p // len(ch_names_con)
                           == i // len(ch_names_con) - 1):
                init[i][p] = 1

    ch_con_mult = np.tile(ch_con_arr, (l_freq, l_freq))
    ch_con_freq = np.multiply(init, ch_con_mult)

    if draw:
        plt.figure()
        # visualizing the matrix and transforming it into array
        plt.subplot(1, 2, 1)
        plt.spy(ch_con)
        plt.title("Connectivity matrix")
        plt.subplot(1, 2, 2)
        plt.spy(ch_con_freq)
        plt.title("Meta-connectivity matrix")

    con_matrixTuple = namedtuple('con_matrix', ['ch_con', 'ch_con_freq'])

    return con_matrixTuple(ch_con=ch_con, ch_con_freq=ch_con_freq)
def HelmholtzNonPeriodic(M, quad, ST, num_processes):

    N = array([2**M, 2**(M)]) 
    L = array([2, 2*pi])

    kx = arange(N[0]).astype(float)
    ky = fftfreq(N[1], 1./N[1])

    Lp = array([2, 2*pi])/L
    K  = array(meshgrid(kx, ky, indexing='ij'), dtype=float)
    K[0] *= Lp[0]; K[1] *= Lp[1] 

    points, weights = ST.points_and_weights(N[0])
    x1 = arange(N[1], dtype=float)*L[1]/N[1]
    X = array(meshgrid(points, x1, indexing='ij'), dtype=float)

    Ix = identity(N[0])
    Iy = identity(N[1])
    Dx = chebDiff2(N[0],quad)
    Dy = diag(-K[1,0,:]**2)
    
    U     = empty((N[0], N[1]))
    U_hat = empty((N[0], N[1]), dtype="complex")
    P     = empty((N[0], N[1]))
    P_hat = empty((N[0], N[1]), dtype="complex")

    alpha = 2.e4
    exact = sin(pi*X[0])*sin(X[1])
    P[:] = (alpha-1.0-pi**2)*exact

    P_hat = fct(P, P_hat, ST, num_processes, comm)
    
    U_hat = U_hat.reshape((N[0]*N[1],1))
    P_hat = P_hat.reshape((N[0]*N[1],1))

    lhs = alpha*kron(Ix,Iy) + kron(Dx,Iy) + kron(Ix,Dy)
    rhs = dot(kron(Ix,Iy),P_hat)
    #sparse.kron(Dy,Ix) + sparse.kron(Iy,Dx)
    testz = kron(kron(Dy,Ix),Ix) + kron(kron(Iy,Dy),Ix) + kron(Ix,kron(Iy,Dx)) 
    pl.spy(testz,precision=1.0e-16, markersize=3)
    pl.show()     
    sys.exit()
    
    U_hat = linalg.solve(lhs,rhs)

    U_hat = U_hat.reshape((N[0],N[1]))
    U = ifct(U_hat, U, ST, num_processes, comm)

    from mpl_toolkits.mplot3d import Axes3D
    import matplotlib.pyplot as plt
    from matplotlib import colors, ticker, cm
    cs = plt.contourf(X[0], X[1], U, 50, cmap=cm.coolwarm)
    cbar = plt.colorbar()
    plt.show()
        
    print "Error: ", linalg.norm(U-exact,inf)
    assert allclose(U,exact)
Example #10
0
def spy(matrix, tol=0.1, color=True):
    fig = plt.figure()
    fig.clf()

    matrix = sps.csr_matrix(matrix)

    if color:
        matrix_dense = np.abs(matrix.todense())
        plt.imshow(matrix_dense, interpolation='none', cmap='binary')
        plt.colorbar()
    else:
        plt.spy(matrix, precision=tol)

    plt.show()
Example #11
0
def spy(matrix, tol=0.1, color=True, title=''):
    fig = plt.figure()
    fig.clf()

    matrix = sps.csr_matrix(matrix)

    elements = matrix.shape[0]
    markersize = (1./float(elements)) * 500.

    if color:
        matrix_dense = np.abs(matrix.todense())
        plt.imshow(matrix_dense, interpolation='none', cmap='binary')
        plt.colorbar()
    else:
        plt.spy(matrix, precision=tol, markersize=markersize)

    plt.title(title)
Example #12
0
    def preview_csr_matrix(self, csr):
        '''
        Displays a portion of the CSR matrix.

        Parameters:
            csr (scipy.sparse.csr_matrix): the matrix

        Returns:
            None
        '''
        fig = plt.figure(figsize=(50, 50))
        tick_range = np.arange(0, 80000, 100)
        plt.yticks(tick_range, list(tick_range))

        plt.xlabel("JobID")
        plt.ylabel("UserID")
        plt.spy(csr, markersize=1, origin="lower")
Example #13
0
def draw_sparsity_pattern(adjdict: Dict[int, List[int]],
                          node_order: List[int] = None,
                          save_fig=False,
                          prefix="",
                          **kwargs):
    g = nx.DiGraph()
    for u, vs in adjdict.items():
        g.add_edges_from([(u, v) for v in vs])
    csr_matrix = nx.to_scipy_sparse_matrix(g, node_order)
    plt.spy(csr_matrix, **kwargs)
    if not save_fig:
        plt.show()
    else:
        path = "./tex/figs/{}_sparsity_pattern.png".format(prefix)
        plt.savefig(path, bbox_inches='tight')
        print("Saved {}".format(path))
    plt.clf()
def Poisson1D(N, f):
    
    f_hat = zeros(N)
    v_hat = zeros(N)
    v = zeros(N)
    
    Bmat = B_matrix(N)
    Ix, Imx = QI2(N)
    Dx, D2x, D4 = QIM(N, quad)
    pl.spy(D4,precision=0.0000000001, markersize=3)
    pl.show()      
    
    f_hat = SC.fct(f,f_hat)

    lhs = dot(Ix, Bmat) 
    rhs = dot(D2x,f_hat)

    v_hat[:-2] = linalg.solve(lhs[2:,:-2],rhs[2:])
    v = ST.ifst(v_hat, v)
    return v
Example #15
0
def plot_intersection_matrix(mylabels):
    '''
    Plots matrix showing intersections/ overlaps between labels
    in the same hemisphere, all the labels are unique
    this means that no labels reduction is possible.
    '''
    import matplotlib.pyplot as pl
    import itertools

    length = len(mylabels)
    intersection_matrix = np.zeros((length, length))
    for i, j in itertools.product(range(length), range(length)):
        if mylabels[i].hemi == mylabels[j].hemi:
            intersection_matrix[i][j] = np.intersect1d(mylabels[i].vertices,
                                                       mylabels[j].vertices).size
        else:
            intersection_matrix[i][j] = 0
    pl.spy(intersection_matrix)
    pl.show()
    return intersection_matrix
Example #16
0
def plot_intersection_matrix(mylabels):
    '''
    Plots matrix showing intersections/ overlaps between labels
    in the same hemisphere, all the labels are unique
    this means that no labels reduction is possible.
    '''
    import matplotlib.pyplot as pl
    import itertools

    length = len(mylabels)
    intersection_matrix = np.zeros((length, length))
    for i, j in itertools.product(range(length), range(length)):
        if mylabels[i].hemi == mylabels[j].hemi:
            intersection_matrix[i][j] = np.intersect1d(mylabels[i].vertices,
                                                       mylabels[j].vertices).size
        else:
            intersection_matrix[i][j] = 0
    pl.spy(intersection_matrix)
    pl.show()
    return intersection_matrix
Example #17
0
File: Solver.py Project: wathen/PhD
def ExactPrecond(P,Mass,L,F,Fspace,Backend="PETSc"):
    Schur = SchurPCD(Mass,L,F, "uBLAS")
    P = P.sparray()
    P.eliminate_zeros()

    P = P.tocsr()

            # P = IO.matToSparse(P)
    plt.spy(Schur)
    plt.savefig("plt2")
    P[Fspace[0].dim():Fspace[0].dim()+Fspace[1].dim(),Fspace[0].dim():Fspace[0].dim()+Fspace[1].dim()] = -Schur #-1e-10*sp.identity(Schur.shape[0])
    u, s, v = svd(P.todense())
    print "#####################",np.sum(s > 1e-10)
    P.eliminate_zeros()
    # P[-2,-2] += 1
    u, s, v = svd(P.todense())
    print "#####################",np.sum(s > 1e-10)

    if Backend == "PETSc":
        return PETSc.Mat().createAIJ(size=P.shape,csr=(P.indptr, P.indices, P.data))
    else:
        return P #.transpose()
Example #18
0
def grind_matrix(file, args):
    name = os.path.basename(file).replace('.mtx', '')

    # read in matrix data
    if args.format == 'matlabtl':
        matrices, realms, imagms = matrixio.read_matlab_matrix_timeline(
            file,
            args.timestep + 1
        )
    elif args.format == 'mm':
        realms = [matrixio.read_matrix_market(file)]
    else:
        print 'Unsupported format'
        return

    # perform requested analysis
    if args.analysis == 'sparsity':
        step = args.timestep if args.format == 'matlabtl' else 0
        pl.spy(realms[step])
        pl.show()
    elif args.analysis == 'range':
        minCell, maxCell, uniqueValues = range_analysis(realms[0])
        print 'Min Value:', minCell
        print 'Max Value:', maxCell
        print 'Unique values / total nonzero values:',
        print uniqueValues, ' / ', realms[0].nnz
        print 'Range:', maxCell - minCell
    elif args.analysis == 'changes':
        if args.format != 'matlabtl':
            print 'Changes analysis only supported in matlabtl format.'
            return
        res = changes_analysis(realms)
        res2 = changes_analysis(imagms)
        nitems = len(res) + len(res2)
        for i, k in enumerate(sorted(res.iterkeys())):
            print i
            pl.subplot(nitems, 1, i + 1)
            pl.spy(res.get(k))
        for i, k in enumerate(sorted(res2.iterkeys())):
            pl.subplot(nitems, 1, len(res) + i + 1)
            pl.spy(res2.get(k))
        pl.show()
    elif args.analysis == 'reordering':
        plot_matrices([realms[0]] + reorder_analysis(realms[0]))
    elif args.analysis == 'storage':
        print 'Running storage format analysis'
        storage_analysis(realms[0])
    elif args.analysis == 'compress_bcsrvi':
        compression_analysis_bcsrvi(realms[0], name)
    elif args.analysis == 'reduce_precision':
        compression_analysis_precision(realms[0], name, args.tolerance)
    elif args.analysis == 'summary':
        summary_analysis(realms[0], name)
    elif args.analysis == 'plot':
        plot_matrices([realms[0]])
    else:
        print 'Unspported analysis'
        return
def parse_fiber_counts():
    yarn_names = json.load(rc.YARN_NAMES,'rb')['yarns']
    stored_json_projects = json.load(rc.STORED_PROJECTS,'rb')

    yarn_data_matrix = np.identity(len(yarn_names))
    yarn_name_to_matrix_id_dict = {}

    index = 0
    for yarn_name in yarn_names:
        yarn_name_to_matrix_id_dict[yarn_name[2]] = index
        index += 1

    json.dump(yarn_name_to_matrix_id_dict,open(rc.YARN_NAME_TO_MATRIX_DICT,'wb'))

    for pattern_group in stored_json_projects['projects']:
        key = pattern_group.keys()[0]
        fibers_for_pattern = json.load(open(rc.STORED_PATTERNS + key,'rb'))

        yarns = []
        for project in fibers_for_pattern[key]:
            for yarn in project['yarn_data']:
                if yarn['yarn_id'] is None or yarn in yarns:
                    continue
                else:
                    yarns.append(yarn)

        for i in yarns:
            for j in yarns:
                if i != j:
                    yarn_data_matrix[yarn_name_to_matrix_id_dict[str(i['yarn_id'])]][yarn_name_to_matrix_id_dict[str(j['yarn_id'])]] += 1

        print key

    plt.spy(yarn_data_matrix, precision=0.01, markersize=1)
    plt.show()

    return pickle.dump(yarn_data_matrix, open(rc.YARN_DATA_MATRIX, 'wb'))
Example #20
0
def main(show_plot=True):
    if show_plot:
        import matplotlib.pylab as plt

    instance = create_problem(0.0, 10.0)
    # Discretize model using Orthogonal Collocation
    discretizer = pyo.TransformationFactory('dae.collocation')
    discretizer.apply_to(instance, nfe=100, ncp=3, scheme='LAGRANGE-RADAU')
    discretizer.reduce_collocation_points(instance,
                                          var=instance.u,
                                          ncp=1,
                                          contset=instance.t)

    # Interface pyomo model with nlp
    nlp = PyomoNLP(instance)
    x = nlp.create_new_vector('primals')
    x.fill(1.0)
    nlp.set_primals(x)

    lam = nlp.create_new_vector('duals')
    lam.fill(1.0)
    nlp.set_duals(lam)

    # Evaluate jacobian
    jac = nlp.evaluate_jacobian()
    if show_plot:
        plt.spy(jac)
        plt.title('Jacobian of the constraints\n')
        plt.show()

    # Evaluate hessian of the lagrangian
    hess_lag = nlp.evaluate_hessian_lag()
    if show_plot:
        plt.spy(hess_lag)
        plt.title('Hessian of the Lagrangian function\n')
        plt.show()

    # Build KKT matrix
    kkt = BlockMatrix(2, 2)
    kkt.set_block(0, 0, hess_lag)
    kkt.set_block(1, 0, jac)
    kkt.set_block(0, 1, jac.transpose())
    if show_plot:
        plt.spy(kkt.tocoo())
        plt.title('KKT system\n')
        plt.show()
Example #21
0
for i in range(n_scenarios):
    instance = create_basic_dense_qp(G, A, bs[i], c)

    nlp = PyomoNLP(instance)
    models.append(instance)
    scenario_name = "s{}".format(i)
    scenarios[scenario_name] = nlp
    coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

x = nlp.x_init()
y = nlp.y_init()

jac_c = nlp.jacobian_c(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

hess_lag = nlp.hessian_lag(x, y)
plt.spy(hess_lag.tocoo())
plt.title('Hessian of the Lagrangian function\n')
plt.show()

kkt = BlockSymMatrix(2)
kkt[0, 0] = hess_lag
kkt[1, 0] = jac_c

plt.spy(kkt.tocoo())
plt.title('KKT system\n')
plt.show()
Example #22
0
File: mini3d.py Project: wathen/PhD
        X = PP[0:V.dim(),0:V.dim()]
        Xdiag = X.diagonal()
        # PP = assemble(-div(v)*p*dx)
        # bc.apply(PP)
        # PP = PP.sparray()
        # Xdiag = X.sum(1).A
        # print Xdiag



        Bt = PP[0:V.dim(),V.dim():W.dim()]
        d = spdiags(1.0/Xdiag, 0, len(Xdiag), len(Xdiag))
        dBt = (d*Bt).tocsr()
        print Bt.transpose()*dBt.todense()

        plt.spy(dBt)
        plt.show()
        BQB = Bt.transpose()*dBt
        dBt = PETSc.Mat().createAIJ(size=dBt.shape,csr=(dBt.indptr, dBt.indices, dBt.data))
        print dBt.size
        BQB = PETSc.Mat().createAIJ(size=BQB.tocsr().shape,csr=(BQB.tocsr().indptr, BQB.tocsr().indices, BQB.tocsr().data))
        # parameters['linear_algebra_backend'] = 'PETSc'
        kspBQB = NSprecondSetup.LSCKSPlinear(BQB)
    elif Solver == "PCD":
        N = FacetNormal(mesh)
        h = CellSize(mesh)
        h_avg =avg(h)
        alpha = 10.0
        gamma =10.0

        (pQ) = TrialFunction(Q)
Example #23
0
    """
    nkts = U.size
    nbfuns = nkts - p - 1
    npts = u.size

    dBij = np.zeros((p + 1, npts))
    for j in range(0, npts):
        span = fspan(u[j], p, U)
        dB_i = dbfuns(span, n, u[j], p, U)

        for i in range(0, p + 1):
            dBij[i, j] = dB_i[n, i]

    return dBij


if __name__ == '__main__':
    import matplotlib.pylab as plt
    import scipy.sparse as sps

    n = 0
    u = 5. / 2
    p = 2
    U = np.array([0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 5], dtype=np.float64)

    u = np.linspace(0, 5, 20)
    (vals, (rows, cols)), sz = dbfunsop(n, u, p, U)
    plt.spy(csc_matrix((vals, (rows, cols)), shape=sz))
    plt.axis('equal')
    plt.show()
Example #24
0
def metaconn_matrix(electrodes: list, ch_con: scipy.sparse.csr_matrix, freqs_mean: list) -> tuple:
    """
    Computes a priori connectivity between pairs of sensors for which
    connectivity indices have been calculated, across space and frequencies
    (based on channel location).

    Arguments:
        electrodes: electrode pairs for which connectivity has been computed,
          list of tuples with channel indices, see indices_connectivity
          intrabrain function in toolbox (analyses).
        ch_con: connectivity matrix between sensors along space based on their
          position, scipy.sparse.csr_matrix of shape (n_channels, n_channels).
        freqs_mean: list of frequencies in the frequency-band-of-interest used
          by MNE for coherence spectral density calculation (connectivity indices).

    Returns:
        metaconn, metaconn_freq:

        - metaconn: a priori connectivity based on channel location, between
          pairs of channels for which connectivity indices have been calculated,
          matrix of shape (len(electrodes), len(electrodes)).

        - metaconn_freq: a priori connectivity between pairs of channels for which
          connectivity indices have been calculated, across space and
          frequencies, for merge data, matrix of shape
          (len(electrodes)*len(freqs_mean), len(electrodes)*len(freqs_mean)).
    """

    metaconn = np.zeros((len(electrodes), len(electrodes)))
    for ne1, (e11, e12) in enumerate(electrodes):
        for ne2, (e21, e22) in enumerate(electrodes):
            # print(ne1,e11,e12,ne2,e21,e22)
            metaconn[ne1, ne2] = (((ch_con[e11, e21]) and (ch_con[e12, e22])) or
                                  ((ch_con[e11, e22]) and (ch_con[e12, e21])) or
                                  ((ch_con[e11, e21]) and (e12 == e22)) or
                                  ((ch_con[e11, e22]) and (e12 == e21)) or
                                  ((ch_con[e12, e21]) and (e11 == e22)) or
                                  ((ch_con[e12, e22]) and (e11 == e21)))

    # duplicating the array 'freqs_mean' times to take channels connectivity
    # across neighboring frequencies into account
    l_freq = len(freqs_mean)

    init = np.zeros((l_freq*len(electrodes),
                     l_freq*len(electrodes)))
    for i in range(0, l_freq*len(electrodes)):
        for p in range(0, l_freq*len(electrodes)):
            if (p//len(electrodes) == i//len(electrodes)) or (p//len(electrodes) == i//len(electrodes) + 1) or (p//len(electrodes) == i//len(electrodes) - 1):
                init[i][p] = 1

    metaconn_mult = np.tile(metaconn, (l_freq, l_freq))
    metaconn_freq = np.multiply(init, metaconn_mult)

    # TODO: option with verbose
    # vizualising the array
    plt.spy(metaconn_freq)

    metaconn_matrixTuple = namedtuple(
        'metaconn_matrix', ['metaconn', 'metaconn_freq'])

    return metaconn_matrixTuple(
        metaconn=metaconn,
        metaconn_freq=metaconn_freq)
Example #25
0
import numpy as np
import scipy.sparse as sparse
import matplotlib.pylab as plt
a = np.loadtxt("stiff_2", skiprows=1)
b = sparse.csr_matrix(a)
plt.spy(b, markersize=1)
plt.show()
Example #26
0
print("Problem statistics:")
print("----------------------")
print("Number of variables: {:>25d}".format(nlp.nx))
print("Number of equality constraints: {:>14d}".format(nlp.nc))
print("Number of inequality constraints: {:>11d}".format(nlp.nd))
print("Total number of constraints: {:>17d}".format(nlp.ng))
print("Number of nnz in Jacobian: {:>20d}".format(nlp.nnz_jacobian_g))
print("Number of nnz in hessian of Lagrange: {:>8d}".format(nlp.nnz_hessian_lag))

x = nlp.x_init()
y = nlp.create_vector_y()
y.fill(1.0)

# Evaluate jacobian of all constraints
jac_g = nlp.jacobian_g(x)
plt.spy(jac_g)
plt.title('Jacobian of the all constraints\n')
plt.show()

# Evaluate jacobian of equality constraints
jac_c = nlp.jacobian_c(x)
plt.title('Jacobian of the equality constraints\n')
plt.spy(jac_c)
plt.show()

# Evaluate jacobian of equality constraints
jac_d = nlp.jacobian_d(x)
plt.title('Jacobian of the inequality constraints\n')
plt.spy(jac_d)
plt.show()
Example #27
0
def showMatrix2(A):
    plt.spy(A)
    plt.show()
Example #28
0
features = binary_vectorizer.get_feature_names()
features[10000:10020]


# Spend some time to look at the binary vectoriser.
# 
# Examine the structure of X. Look at some the rows and columns values.

# In[22]:


# see the density of 0s and 1s in X
import scipy.sparse as sps
import matplotlib.pyplot as plt
plt.figure(figsize=(20,10))
plt.spy(X.toarray())
plt.show()


# Look at the sparse matrix above. Notice how some columns are quite dark (i.e. the words appear in almost every file). 
# 
# What are the 5 most common words?

# In[23]:


# your code here
value=X.toarray().sum(axis=0)
re=pd.Series(value)
re.index=features
re.sort_values(ascending=False)[0:5]
Example #29
0
        PP = assemble(inner(u, v) * dx - div(v) * p * dx)
        #        bc.apply(PP)
        PP = PP.sparray()
        X = PP[0:V.dim(), 0:V.dim()]
        Xdiag = X.diagonal()
        # PP = assemble(-div(v)*p*dx)
        # bc.apply(PP)
        # PP = PP.sparray()
        # Xdiag = X.sum(1).A
        # print Xdiag

        Bt = PP[0:V.dim(), V.dim():W.dim()]
        d = spdiags(1.0 / Xdiag, 0, len(Xdiag), len(Xdiag))
        dBt = (d * Bt).tocsr()

        plt.spy(dBt)
        plt.show()
        BQB = Bt.transpose() * dBt
        dBt = PETSc.Mat().createAIJ(size=dBt.shape,
                                    csr=(dBt.indptr, dBt.indices, dBt.data))
        print dBt.size
        BQB = PETSc.Mat().createAIJ(size=BQB.tocsr().shape,
                                    csr=(BQB.tocsr().indptr,
                                         BQB.tocsr().indices,
                                         BQB.tocsr().data))
        # parameters['linear_algebra_backend'] = 'PETSc'
        kspBQB = NSprecondSetup.Ksp(BQB)
    elif Solver == "PCD":
        N = FacetNormal(mesh)
        h = CellSize(mesh)
        h_avg = avg(h)
Example #30
0
File: NSold.py Project: wathen/PhD
        uu = Function(W)
        tic()
        PP,Pb = assemble_system(prec, L1,bcs)
        AA, bb = assemble_system(a, L1,  bcs)
        A,b = CP.Assemble(AA,bb)
        P = CP.Assemble(PP)
        u = b.duplicate()
        print toc()
        ff = assemble(f)
        bc.apply(ff)
        ff = ff.array()
        ff = ff[0:V.dim()]
        low_values_indices = np.abs(ff) < 1e-100
        print A
        # b = b.getSubVector(t_is)
        plt.spy(PP.sparray())
        # plt.savefig("plt1")
        F = assemble(fp)
        # bcp.apply(F)
        F = CP.Assemble(F)
#         # L = CP.Assemble(L)
        P = S.ExactPrecond(PP,QQ,L,F,[V,Q])
#         # bcp.apply(QQ)
        Mass = CP.Assemble(QQ)
#         # P = IO.matToSparse(P)
#         # plt.spy(P)
#         # plt.savefig("plt2")
#         # ss
#         # # P[W.dim()-1,W.dim()-1]  += 1
#         # P.assemblyBegin() # Make matrices useable.
#         # P.assemblyEnd()
Example #31
0
def plotOccupationNumberVarianceAndCondensateFraction(nrSites, nrBosons):
    plotX=[]
    plotOccupationNumberVariance=[]
    plotCondensateFraction=[]
    print "Setting up the vectors of the problem..."

    # nrSites=5
    # nrBosons=nrSites

    state = BHState(nrSites, nrBosons)
    allStates = getBasisAndExpectationValues(state)

    print "...Done. Setting up matrices..."

    H_int = [H_int_ofState(state.rep)] + [0] * (len(allStates)-1)
    H_kin_x = []
    H_kin_y = []
    H_kin_val = []

    j = 0
    H_kin_row(state.rep, allStates, j, H_kin_x, H_kin_y, H_kin_val)
    while state.next():
        j += 1
        if True:  # impose a condition, such as a maximum occupation number
            H_kin_row(state.rep, allStates, j, H_kin_x, H_kin_y, H_kin_val)
            H_int[j] = H_int_ofState(state.rep)

    H_i = dia_matrix(([H_int], [0]), shape=(len(H_int), len(H_int)))
    H_kin=coo_matrix((H_kin_val,(H_kin_x, H_kin_y)))
    plt.spy(H_kin)
    plt.show()
    # print "...Done. Getting eigenvalues..."
    # Add Parameters U, J here
    J=1.
    UoverJ=0.
    print "Calculating for U/J="
    while UoverJ<20:
        print "...", UoverJ
        plotX+=[UoverJ]
        U=UoverJ*J # J=1, but this is for correctness...
        vals, vecs = eigsh(U/2. * H_i -J*H_kin, which='SA', k=3)
    # print "...Done! Eigenvalues are:"
    # print vals
        gs=np.transpose(vecs)[0]
    # print gs
    # print "Occupation number variance (groundstate):"
        O_n=[]
        i=nrBosons
        while i>=0:
            O_n=O_n+[i]*dimension(nrBosons-i, nrSites-1)
            i-=1
        N_0=dia_matrix(([O_n],[0]), shape=(len(O_n), len(O_n)))
        N2_0=dia_matrix(([np.power(np.array(O_n),2)],[0]), shape=(len(O_n), len(O_n)))
        occupationNumberVariance=sqrt(np.transpose(gs).dot(N2_0.dot(gs))-pow(np.transpose(gs).dot(N_0.dot(gs)),2))
        plotOccupationNumberVariance+=[occupationNumberVariance]
    # print occupationNumberVariance
    # print "Computing SPDM..."
        SPDM=np.transpose(gs).dot(N_0.dot(gs))*np.identity(nrSites)
        for j in range(1, nrSites/2+1): # a_0^*a_0 is <N_0>, see above
            # get matrix a_0^*a_j
            mat=getCorrelationMatrix(j, state, allStates)
            temp = np.transpose(gs).dot(mat.dot(gs))
            for a in range(nrSites):
                SPDM[a, (a+j)%nrSites]=temp
                SPDM[a, (a-j)%nrSites]=temp
    # print "...Done. Eigenvalues of SPDM:"
        SPDMeig = np.linalg.eigvalsh(SPDM)
        plotCondensateFraction+=[np.nanmax(SPDMeig)/nrBosons]
        UoverJ+=1
        # print SPDM
        # print SPDMeig
    plt.plot(plotX, plotOccupationNumberVariance, "b-", label="Occupation Number Variance")
    plt.plot(plotX, plotCondensateFraction, "r-", label="Condensate Fraction")
    plt.legend()
    plt.show()
def Poisson2D(M, quad):

    N = array([2**M, 2**(M)]) 
    L = array([2, 2*pi])
   
    kx = arange(N[0]).astype(float)
    ky = fftfreq(N[1], 1./N[1])

    Lp = array([2, 2*pi])/L
    K  = array(meshgrid(kx, ky, indexing='ij'), dtype=float)
    K[0] *= Lp[0]; K[1] *= Lp[1] 

    points, weights = ST.points_and_weights(N[0])
    x1 = arange(N[1], dtype=float)*L[1]/N[1]
    X = array(meshgrid(points, x1, indexing='ij'), dtype=float)

    Bmat = B_matrix(N[0])
    I2x, I2mx = QI2(N[0])
    Id = identity(N[0])
    Dx, D2, D4 = QIM(N[0], quad)

    p_hat = empty((N[0],N[1]), dtype="complex")
    u_hat = empty((N[0],N[1]), dtype="complex")
    #v_hat = empty((N[0],N[1]), dtype="complex")

    p = empty((N[0],N[1]))
    u = empty((N[0],N[1]))
    #v = empty((N[0],N[1]))

    u = -2.*sin(X[0]+X[1])
    #v = 2.*(1.-X[0]**2)
    u_hat = SC.fct(u,u_hat)
    #v_hat = SC.fct(v,v_hat)

    #v_hat *=1j*K[1]
    alpha = K[1, 0]**2
    alphaI = diag(alpha)

    lhs = I2x - dot(D2,alphaI)
    rhsR = dot(D2,u_hat.real)
    rhsI = dot(D2,u_hat.imag)
    print linalg.cond(D2[2:,:-2])
    #for i in range(N[1]):
        #l1 = dot(I2x, Dx)
        #l2 = dot(Bmat,Bmat)
        #l12 = dot(l1,l2)
        #l3 = dot(D2,Bmat)    

        #lhsR = dot(l12,u_hat[:,i].real) + dot(l3,v_hat[:,i].real)
        #lhsI = dot(l12,u_hat[:,i].imag) + dot(l3,v_hat[:,i].imag)

        #rhs = -alpha[i]*D2 + I2x
    pl.spy(lhs,precision=0.0000000001, markersize=3)
    pl.show()    
        #p_hat[:-2,i].real = linalg.solve(rhs[2:,:-2],lhsR[2:])
        #p_hat[:-2,i].imag = linalg.solve(rhs[2:,:-2],lhsI[2:])

    p_hat.real[:-3] = linalg.solve(lhs[3:,1:-2],rhsR[2:-1])
    p_hat.imag[:-3] = linalg.solve(lhs[3:,1:-2],rhsI[2:-1])    
    p = SC.ifct(p_hat, p)

    from mpl_toolkits.mplot3d import Axes3D
    import matplotlib.pyplot as plt


    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.plot_surface(X[0], X[1], p)

    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('p(x,y)')

    plt.show()
Example #33
0
    print 'Center person index not found!'
    sys.exit(0)


# Load adjacent matrix of the wiki-graph
# A = loadmat(path_to_data + 'W.mat')['W']
# A = loadmat(path_to_data + '/test/W.mat')['W']
# A = loadmat(path_to_data + 'A.mat')['A']
A = loadmat(path_to_data + 'A_6800.mat')['A']
args['matrix_pairwise_sim'] = get_similarity_matrix(A, 
                                                    coefficients = [3, 2, 1])
# Use some clustering algorithm

import matplotlib.pylab as plt

plt.spy(A, marker = '.', markersize = 0.05)
plt.savefig("/home/rdkl/all_matrix.png",bbox_inches='tight')
# t = clock()
# clusters = KMeans(A+A.T,args)
# print clock() - t

#central_person_cluster = min_sum_distance(A, args)
#titles = visualize(central_person_cluster, id2index_filename)

central_person_cluster, clusters = spectral_clustering(A[:100, :100], args)
# titles = visualize(central_person_cluster, id2index_filename)
# print titles
with open(path_to_data + center_person + '_' + str(args['number_of_clusters']) + '.txt', 'w') as f:
    for item in titles:
        print >>f, item
        print item 
def spy(m, prec=.001, size=5):
    pl.spy(m,precision=prec, markersize=size)
    pl.show()
Example #35
0
import cantera as ct
import numpy as np
from matplotlib import pylab as pl

gas   = ct.Solution("../gri30.xml")
nspec = gas.n_species
nrxn  = gas.n_reactions
vcoef = gas.product_stoich_coeffs()+ \
        gas.reactant_stoich_coeffs()

ijac = []
for i in range(nspec):
  for j in range(nspec):
    for k in range(nrxn):
      if vcoef[i,k]!=0.0 and vcoef[j,k]!=0.0:
        ijac.append((i,j))
        break
  ijac.append((i,nspec))
for j in range(nspec+1):
  ijac.append((nspec,j))

J = np.zeros((nspec+1,nspec+1),dtype=np.int)
for i,ix in enumerate(ijac):
  J[ix]=1

pl.spy(J,markersize=1)
pl.show()

Example #36
0
             'radius':0.2 }

props = [ { 'radius' : 0.1 },
            { 'mu' : 1.0 },
            {} ]

warp = Warp(endpts, props, defaults, [10,10,10], MultiphysicsProblem)

warp.output_states("src/unit_tests/warp_{0}_.pvd",0)
warp.output_surfaces("src/unit_tests/warp_mesh_{0}_.pvd",0)

warp.create_contacts()

M = warp.assemble_form('AX','W')
from matplotlib import pylab as plt
plt.spy(M.array())
plt.show()

M,AX,AV = warp.assemble_forms(['M','AX','AV'],'W')
plt.spy(M.array())
plt.figure()
plt.spy(AX.array())
plt.figure()
plt.spy(AV.array())
plt.show()

plt.spy(warp.mcache['AX'].array())
plt.show()

F = warp.assemble_form('F','W')
print F
Example #37
0
File: test.py Project: wathen/PhD
# data = loadtxt("A_epetra.txt")
# col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
n = 1e6
A = scipy.sparse.rand(n, n, density=1/(n**1.4), format='csr')

# plt.spy(A)
# plt.show()
comm = Epetra.PyComm()
tic()
As = scipy_csr_matrix2CrsMatrix(A, comm)
print toc()
tic()
Ap = PETSc.Mat().createAIJ(size=A.shape,csr=(A.indptr, A.indices, A.data))
# Ap = io.arrayToMat(As)
print toc()

tic
Anew = io.matToSparse(Ap)
# from petsc4py import PETSc as _PETSc
# data = As.getValuesCSR()
# (Istart,Iend) = As.getOwnershipRange()
# columns = As.getSize()[0]
# sparseSubMat = sps.csr_matrix(data[::-1],shape=(Iend-Istart,columns))
# comm = _PETSc.COMM_WORLD

# sparseSubMat = comm.tompi4py().allgather(sparseSubMat)
# A = sps.vstack(sparseSubMat)
print toc()

plt.spy(A)
plt.show()
Example #38
0
for i in range(n_scenarios):
    instance = create_basic_dense_qp(G, A, bs[i], c)

    nlp = PyomoNLP(instance)
    models.append(instance)
    scenario_name = "s{}".format(i)
    scenarios[scenario_name] = nlp
    coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

x = nlp.x_init()
y = nlp.y_init()

jac_c = nlp.jacobian_c(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

hess_lag = nlp.hessian_lag(x, y)
plt.spy(hess_lag.tofullmatrix())
plt.title('Hessian of the Lagrangian function\n')
plt.show()

kkt = BlockSymMatrix(2)
kkt[0, 0] = hess_lag
kkt[1, 0] = jac_c

plt.spy(kkt.tofullmatrix())
plt.title('KKT system\n')
plt.show()
Example #39
0
    eps = pow(10, -9)
    rank = estimate_rank(aslinearoperator(componentMatrix), eps)
    #print 'Approximate rank with relative error of(', eps, ')for numerical rank definition = ',rank

    print obj.data_file_list[i], obj.data_file_name[
        i], componentMatrix.shape, ' NonZeros = ', componentMatrix.nnz, ' 1-Norm = ', normMatrix, ' rank ', rank
    obj.logger_i.info(obj.data_file_list[i] + ' ' + obj.data_file_name[i] +
                      str(componentMatrix.shape) + ' NonZeros = ' +
                      str(componentMatrix.nnz) + ' 1-Norm = ' +
                      str(normMatrix) + ' rank ' + str(rank))

    #saving the sparsity pattern
    fig = plt.figure(figsize=(24.0, 15.0))
    fig.clf()
    fig.gca().add_artist(plt.spy(componentMatrix))
    brake.save(obj.output_path + 'dataAnalysis/' + obj.data_file_name[i],
               ext="png",
               close=True,
               verbose=False)

end_program = timeit.default_timer()

print "\n", "\n", "\n", 'Total Run Time = : ' + "%.2f" % (
    end_program - begin_program) + ' sec'

if (obj.log_level):
    obj.logger_i.info(
        '\nNote: The rank obtained using python estimate_rank utility')
    obj.logger_i.info(
        'is observed to be lower than the rank obtained using MATLAB')
Example #40
0
    a = inner(curl(v),curl(u))*dx
    m = inner(u,v)*dx
    b = inner(vMix,grad(pMix))*dx

    # <codecell>

    A = assemble(a)
    M = assemble(m)
    Ms = M.sparray()
    A = A.sparray()

    # # <codecell>

    B = assemble(b)
    B = B.sparray()[:V.dim(),W.dim()-Q.dim():]
    plt.spy (B.todense())
    # plt.show()
    # # <codecell>

    ksp = PETSc.KSP().create()
    # parameters['linear_algebra_backend'] = 'PETSc'
    M = assemble(m)
    M = CP.Assemble(M)
    ksp.setOperators(M)
    x = M.getVecLeft()
    ksp.setFromOptions()
    ksp.setType(ksp.Type.CG)
    ksp.setTolerances(1e-6)
    ksp.pc.setType(ksp.pc.Type.BJACOBI)

    # <codecell>
Example #41
0
    return m

instance = create_problem(0.0, 10.0)
# Discretize model using Orthogonal Collocation
discretizer = aml.TransformationFactory('dae.collocation')
discretizer.apply_to(instance, nfe=100, ncp=3, scheme='LAGRANGE-RADAU')
discretizer.reduce_collocation_points(instance, var=instance.u, ncp=1, contset=instance.t)

# Interface pyomo model with nlp
nlp = PyomoNLP(instance)
x = nlp.create_vector_x()
lam = nlp.create_vector_y()

# Evaluate jacobian
jac_c = nlp.jacobian_g(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

# Evaluate hessian of the lagrangian
hess_lag = nlp.hessian_lag(x, lam)
plt.spy(hess_lag)
plt.title('Hessian of the Lagrangian function\n')
plt.show()

# Build KKT matrix
kkt = BlockSymMatrix(2)
kkt[0, 0] = hess_lag
kkt[1, 0] = jac_c
plt.spy(kkt.tocoo())
plt.title('KKT system\n')
Example #42
0
File: MHD.py Project: wathen/PhD
        if IterType == "CD":
            bb = assemble((Lmaxwell + Lns) - RHSform)
            for bc in bcs:
                bc.apply(bb)

            A,b = CP.Assemble(AA,bb)

            P = CP.Assemble(PP)
            print b
            u = b.duplicate()

        else:
            AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform,  bcs)
            Aa = AA.sparray()
            Aa.eliminate_zeros()
            plt.spy(Aa)
            plt.show()
            A,b = CP.Assemble(AA,bb)
            del AA
            F = assemble(fp)
            F = CP.Assemble(F)
            P = S.ExactPrecond(PP,Q,L,F,FSpaces)
            Mass = CP.Assemble(Q)

            u = b.duplicate()

        NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
        M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))

        kspNS = PETSc.KSP().create()
        kspM = PETSc.KSP().create()
Example #43
0
 def plot(self):
     matrix = coo_matrix(
         (numpy.ones(len(self._x)), (self._y - self._y.min(),
                                     self._x - self._x.min())))
     pylab.spy(matrix)
     pylab.show()
Example #44
0
def metaconn_matrix_2brains(electrodes: list, ch_con: scipy.sparse.csr_matrix, freqs_mean: list, plot: bool = False) -> tuple:
    """
    Computes a priori connectivity across space and frequencies
    between pairs of channels for which connectivity indices have
    been calculated, to merge data (2 brains).

    Arguments:
        electrodes: electrode pairs for which connectivity indices have
          been computed, list of tuples with channels indexes, see
          indices_connectivity_interbrain function in toolbox (analyses).
        ch_con: connectivity matrix between channels along space based on their
          position, scipy.sparse.csr_matrix of shape (n_channels, n_channels).
        freqs_mean: list of frequencies in the frequency-band-of-interest used
          by MNE for coherence spectral density calculation (connectivity indices).
        plot: option to plot the connectivity matrices, boolean.

    Note:
        It is assumed that there was no a priori connectivity
        between channels from the two participants.

    Returns:
        metaconn, metaconn_freq:

        - metaconn: a priori connectivity based on channel location, between
          pairs of channels for which connectivity indices have been calculated,
          to merge data, matrix of shape (len(electrodes), len(electrodes)).

        - metaconn_freq: a priori connectivity between pairs of channels for which
          connectivity indices have been calculated, across space and
          frequencies, to merge data, matrix of shape
          (len(electrodes)*len(freqs_mean), len(electrodes)*len(freqs_mean)).
    """

    n = np.max(electrodes, axis=0)[0]+1
    # n = 62
    metaconn = np.zeros((len(electrodes), len(electrodes)))
    for ne1, (e11, e12) in enumerate(electrodes):
        for ne2, (e21, e22) in enumerate(electrodes):
            # print(ne1,e11,e12,ne2,e21,e22)
            # considering no a priori connectivity between the 2 brains
            metaconn[ne1, ne2] = (((ch_con[e11, e21]) and (ch_con[e12-n, e22-n])) or
                                  ((ch_con[e11, e21]) and (e12 == e22)) or
                                  ((ch_con[e12-n, e22-n]) and (e11 == e21)) or
                                  ((e12 == e22) and (e11 == e21)))

    # duplicating the array 'freqs_mean' times to take channel connectivity
    # across neighboring frequencies into account
    l_freq = len(freqs_mean)

    init = np.zeros((l_freq*len(electrodes),
                     l_freq*len(electrodes)))
    for i in range(0, l_freq*len(electrodes)):
        for p in range(0, l_freq*len(electrodes)):
            if (p//len(electrodes) == i//len(electrodes)) or (p//len(electrodes) == i//len(electrodes) + 1) or (p//len(electrodes) == i//len(electrodes) - 1):
                init[i][p] = 1

    metaconn_mult = np.tile(metaconn, (l_freq, l_freq))
    metaconn_freq = np.multiply(init, metaconn_mult)

    if plot:
        # vizualising the array
        plt.figure()
        plt.spy(metaconn_freq)
        plt.title("Meta-connectivity matrix")

    metaconn_matrix_2brainsTuple = namedtuple(
        'metaconn_matrix_2brains', ['metaconn', 'metaconn_freq'])

    return metaconn_matrix_2brainsTuple(
        metaconn=metaconn,
        metaconn_freq=metaconn_freq)
Example #45
0
 def spy(self):
     pylab.spy(self.B)
     pylab.show()
Example #46
0
    def show_mat(sm):
#         print sm
        plt.spy(sm, marker='.', precision=0.1, markersize=5)
        plt.show()
Example #47
0
import pandas as pd
import numpy as np
from scipy import sparse
import scipy as sp
import matplotlib.pylab as plt

# read the interaction matrix

#interaction_sparse_stage = sparse.load_npz('data/interaction_v3.npz')
# interaction_sparse.data = np.nan_to_num(interaction_sparse.data, copy=False)


interaction_sparse_step = sparse.load_npz('data/interaction_v5.npz')
# interaction_sparse.data = np.nan_to_num(interaction_sparse.data, copy=False)

plt.spy(interaction_sparse_stage, precision='present',aspect='auto')
plt.spy(interaction_sparse_step,precision=0.1, aspect='auto')

plt.spy(interaction_sparse, aspect='auto', precision=0.1, markersize=1,marker=',').grid(b=None)
Example #48
0
    y = []
    for (i, j) in sorted(indx, key=indx.get):
        if j==1:
            n = indx[(i,j)]
            x.append(ns.N-i)
            y.append(ns.N*f[n]/ns.R0)

    assert(abs(1.0-sum(y)) < 1.0e-12)

    if ns.mean:
        print('sum %f' % (np.sum(np.array(y)),))
        print('mean %f' % (np.sum((1+np.array(x))*np.array(y)),))

    if ns.splot:
        pylab.figure()
        pylab.spy(B)
        pylab.title('Sparsity pattern')

    if ns.pplot:
        pylab.figure()
        pylab.plot(x, y)
        pylab.title('Probability mass function of outbreak sizes\nN=%d, a=%d, R0=%f (dimension %d)' % (ns.N, ns.a, ns.R0, len(indx)))

    print('# N=%d, a=%d, R0=%f, total dimension of matrix is %d' % (ns.N, ns.a, ns.R0, len(indx)))
    for i in range(len(x)):
        print('%d, %f' % (int(x[i]), y[i]))

    if ns.splot or ns.pplot:
        pylab.show()

    sys.exit(0)
def get_smamin_rearrangement(N, PrP, V=None, Q=None, invinds=None, nu=None,
                             Pdof=None, M=None, A=None, B=None, mesh=None,
                             addnedgeat=None,
                             scheme='TH', fullB=None, crinicell=None):
    from smamin_utils import col_columns_atend
    from scipy.io import loadmat, savemat
    """ rearrange `B` and `M` for smart minimal extension

    and return the indices of the ext. nodes

    Parameters
    ----------
    scheme : {'TH', 'CR'}
        toggle the scheme

         * 'TH' : Taylor-Hood
         * 'CR' : Crouzeix-Raviart

    crinicell : int, optional
        the starting cell for the 'CR' scheme, defaults to `0`
    addnedge : int, optional
        whether to add a Neumann edge in the CR scheme, defaults to `None`

    Returns
    -------
    MSmeCL : (N,N) sparse matrix
        the rearranged mass matrix (columns and lines swapped)
    BSme : (K, N) sparse matrix
        the rearranged divergence matrix (columns swapped)
    B2Inds : (K, ) array
        indices of the nodes corresponding to the minimal extension
        w.r.t all nodes of the velocity space
    B2BoolInv : (N, ) boolean array
        mask of the ext. nodes w.r.t. the inner nodes in V,
        e.g. `v2 = v[B2BoolInv]`
    B2BI : (K, ) int array
        indices of the ext. nodes w.r.t the inner nodes in V
    """
    Q = PrP.Q if Q is None else Q
    V = PrP.V if V is None else V
    invinds = PrP.invinds if invinds is None else invinds
    nu = PrP.nu if nu is None else nu
    mesh = PrP.mesh if mesh is None else mesh
    Pdof = PrP.Pdof if Pdof is None and PrP is not None else Pdof

    if scheme == 'TH':
        print 'solving index 1 -- with TH scheme'
        dname = 'mats/SmeMcBc_N{0}nu{1}_TH'.format(N, nu)
        get_b2inds_rtn = get_B2_bubbleinds
        args = dict(N=N, V=V, mesh=mesh)
    elif scheme == 'CR':
        print 'solving index 1 -- with CR scheme'
        dname = 'mats/SmeMcBc_N{0}nu{1}_CR'.format(N, nu)
        # pressure-DoF of B_matrix NOT removed yet!
        get_b2inds_rtn = get_B2_CRinds
        args = dict(N=N, V=V, mesh=mesh, Q=Q, inicell=crinicell,
                    B_matrix=B, invinds=invinds)

    try:
        SmDic = loadmat(dname)
        pdoflist = loadmat(dname+'pdoflist')  # TODO enable saving again

    except IOError:
        print 'Computing the B2 indices...'
        # get the indices of the B2-part
        B2Inds, pdoflist = get_b2inds_rtn(**args)
        if addnedgeat is not None:
            # TODO: hard coded filtering of the needed V bas func
            # list of columns that have a nnz at cell #addnedge
            potcols = fullB[addnedgeat, :].indices
            for col in potcols:
                # TODO here we need B
                if fullB[:, col].nnz == 1:
                    coltoadd = col
                    break
            B2Inds = np.r_[coltoadd, B2Inds]
            # end TODO

        # the B2 inds wrt to inner nodes
        # this gives a masked array of boolean type
        B2BoolInv = np.in1d(np.arange(V.dim())[invinds], B2Inds)
        # this as indices
        B2BI = np.arange(len(B2BoolInv), dtype=np.int32)[B2BoolInv]
        # Reorder the matrices for smart min ext...
        # ...the columns
        print 'Rearranging the matrices...'
        # Reorder the matrices for smart min ext...
        # ...the columns
        MSmeC = col_columns_atend(M, B2BI)
        BSme = col_columns_atend(B, B2BI)
        # ...and the lines
        MSmeCL = col_columns_atend(MSmeC.T, B2BI)
        if A is not None:
            ASmeC = col_columns_atend(A, B2BI)
            ASmeCL = (col_columns_atend(ASmeC.T, B2BI)).T

        print 'done'

        savemat(dname, {'MSmeCL': MSmeCL,
                        'ASmeCL': ASmeCL,
                        'BSme': BSme,
                        'B2Inds': B2Inds,
                        'B2BoolInv': B2BoolInv,
                        'B2BI': B2BI})
        if scheme == 'CR':
            savemat(dname+'pdoflist', {'pdoflist': pdoflist})

    SmDic = loadmat(dname)

    MSmeCL = SmDic['MSmeCL']
    ASmeCL = SmDic['ASmeCL']
    BSme = SmDic['BSme']
    B2Inds = SmDic['B2Inds']
    B2BoolInv = SmDic['B2BoolInv'] > 0
    B2BoolInv = B2BoolInv.flatten()
    B2BI = SmDic['B2BI']
    if scheme == 'CR':
        pdoflist = loadmat(dname+'pdoflist')['pdoflist']
    else:
        pdoflist = None
    only_check_cond = False
    if only_check_cond:
        print 'Scheme is ', scheme
        import matplotlib.pylab as pl
        if Pdof is None:
            B2 = BSme[:, :][:, -B2Inds.size:]
            B2res = fullB[pdoflist.flatten(), :][:, B2Inds.flatten()]
            print 'condition number is ', npla.cond(B2res.todense())
            # B2res = BSme[pdoflist.flatten(), :][:, -B2Inds.size:]
            pl.figure(2)
            pl.spy(B2res)  # [:100, :][:, :100])
        elif Pdof == 0:
            B2 = BSme[1:, :][:, -B2Inds.size:]
            print 'condition number is ', npla.cond(B2.todense())
        else:
            raise NotImplementedError()
        print 'N is ', N
        print 'B2 shape is ', B2.shape
        pl.figure(1)
        pl.spy(B2)
        pl.show(block=False)
        import sys
        sys.exit('done')

    if fullB is not None and only_check_cond:
        fbsme = col_columns_atend(fullB, B2Inds.flatten())
        import matplotlib.pylab as pl
        pl.figure(2)
        pl.spy(fbsme)
        pl.show(block=False)
        fbsmec = fbsme[0:, :][:, -B2Inds.size:]
        pl.figure(3)
        pl.spy(fbsmec)
        pl.show(block=False)
        if pdoflist is not None:
            linelist = []
            for pdof in pdoflist.flatten().tolist()[1:]:
                linelist.append(fbsmec[pdof, :])
            fbsmecr = sps.vstack(linelist)
        pl.figure(4)
        pl.spy(fbsmecr)
        pl.show(block=False)

        print 'condition number is ', npla.cond(fbsmecr.T.todense())
        print 'N is ', N

    if A is None:
        return MSmeCL, BSme, B2Inds, B2BoolInv, B2BI
    else:
        return MSmeCL, ASmeCL, BSme, B2Inds, B2BoolInv, B2BI
Example #50
0
    # aa
    diagAA = AA.array()
    print toc()
    # print diagAAdd
    # diagAA = AA.diagonal()
    BClagrange = numpy.abs(diagAA[Magnetic.dim():]) > 1e-3
    BCmagnetic = numpy.abs(diagAA[:Magnetic.dim()]) > 1e-3
    onelagrange = numpy.ones(Lagrange.dim())
    onemagnetic = numpy.ones(Magnetic.dim())
    onelagrange[BClagrange] = 0
    onemagnetic[BCmagnetic] = 0
    Diaglagrange = spdiags(onelagrange,0,Lagrange.dim(),Lagrange.dim())
    Diagmagnetic = spdiags(onemagnetic,0,Magnetic.dim(),Magnetic.dim())
    # plt.spy(C*Diag)
    # plt.figure()
    plt.spy(Diagmagnetic)
    # plt.show()

    tic()
    C = Diagmagnetic*C*Diaglagrange
    print "BC applied to gradient, time: ", toc()
    # print C
    # BC = ~numpy.array(BC)
    # tic
    # CC = C
    # ii,jj = C[:,BC].nonzero()
    # C[ii,jj] = 0
    # print (CC-C).todense()
    # # print C
    # Curl = assemble(inner(curl(u),curl(v))*dx)
    # Mass = assemble(inner(u,v)*dx)
Example #51
0
def plotMatrix(A):
   import matplotlib.pylab as pl
   pl.spy(A,markersize=1)
   pl.show()
Example #52
0
mapdl = launch_mapdl()
mm = mapdl.math

################################################################################
# Load and solve verification manual example 153.  Then load the
# stiffness matrix into APDLmath.
out = mapdl.input(vmfiles["vm153"])
k = mm.stiff(fname="PRSMEMB.full")
k

################################################################################
# Copy this APDLMath Sparse Matrix to a SciPy CSR matrix and plot the
# graph of the sparse matrix
pk = k.asarray()
plt.spy(pk)

################################################################################
# You can access the 3 vectors that describe this sparse matrix with.
#
# - ``pk.data``
# - ``pk.indices``
# - ``pk.indptr``
#
# See the ``scipy`` documentation of the csr matrix at `scipy.sparse.csr_matrix <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html>`_ for additional details.

print(pk.data[:10])
print(pk.indices[:10])
print(pk.indptr[:10])

################################################################################
Example #53
0
    SolutionTime = 0
    while eps > tol and iter < maxiter:
        iter += 1
        x = Function(W)

        uu = Function(W)
        tic()
        PP,Pb = assemble_system(prec, L1,bcs)
        AA, bb = assemble_system(a, L1 - RHSform,  bcs)
        A,b = CP.Assemble(AA,bb)
        P = CP.Assemble(PP)
        u = b.duplicate()
        print toc()
        print A
        # b = b.getSubVector(t_is)
        plt.spy(PP.sparray())
        plt.savefig("plt1")
        F = assemble(fp)
        F = CP.Assemble(F)
        # L = CP.Assemble(L)
        P = S.ExactPrecond(PP,QQ,L,F,[V,Q])
        Mass = CP.Assemble(QQ)
        # P = IO.matToSparse(P)
        # plt.spy(P)
        # plt.savefig("plt2")

        kspNS = PETSc.KSP().create()
        kspNS.setTolerances(1e-5)
        kspNS.setOperators(P)
        # A.destroy()
        # P.destroy()