Exemplo n.º 1
0
def poisson2d_sym_blk(n):
    L = spmatrix.ll_mat_sym(n*n)
    I = spmatrix.ll_mat_sym(n)
    P = spmatrix.ll_mat_sym(n)
    for i in range(n):
        I[i,i] = -1
    for i in range(n):
        P[i,i] = 4
        if i > 0: P[i,i-1] = -1
    for i in range(0, n*n, n):
        L[i:i+n,i:i+n] = P
        if i > 0: L[i:i+n,i-n:i] = I
    return L
Exemplo n.º 2
0
def poisson2d_sym_blk(n):
    L = spmatrix.ll_mat_sym(n*n)
    I = spmatrix.ll_mat_sym(n)
    P = spmatrix.ll_mat_sym(n)
    for i in range(n):
        I[i,i] = -1
    for i in range(n):
        P[i,i] = 4
        if i > 0: P[i,i-1] = -1
    for i in range(0, n*n, n):
        L[i:i+n,i:i+n] = P
        if i > 0: L[i:i+n,i-n:i] = I
    return L
Exemplo n.º 3
0
def poisson2d_vec_sym_blk(n):
    n2 = n * n
    L = spmatrix.ll_mat_sym(n2, 3 * n2 - 2 * n)
    D = spmatrix.ll_mat_sym(n, 2 * n - 1)
    d = numpy.arange(n, dtype=numpy.int)
    D.put(4.0, d)
    D.put(-1.0, d[1:], d[:-1])
    P = spmatrix.ll_mat_sym(n, n - 1)
    P.put(-1, d)
    for i in xrange(n - 1):
        L[i * n:(i + 1) * n, i * n:(i + 1) * n] = D
        L[(i + 1) * n:(i + 2) * n, i * n:(i + 1) * n] = P
    # Last diagonal block
    L[-n:, -n:] = D
    return L
Exemplo n.º 4
0
def poisson2d_vec_sym_blk(n):
    n2 = n*n
    L = spmatrix.ll_mat_sym(n2, 3*n2-2*n)
    D = spmatrix.ll_mat_sym(n, 2*n-1)
    d = numpy.arange(n, dtype=numpy.int)
    D.put(4.0, d)
    D.put(-1.0, d[1:], d[:-1])
    P = spmatrix.ll_mat_sym(n, n-1)
    P.put(-1,d)
    for i in xrange(n-1):
        L[i*n:(i+1)*n, i*n:(i+1)*n] = D
        L[(i+1)*n:(i+2)*n, i*n:(i+1)*n] = P
    # Last diagonal block
    L[-n:,-n:] = D
    return L
Exemplo n.º 5
0
    def __init__(self, **kwargs):

        nrow = kwargs.get('nrow', 0)
        ncol = kwargs.get('ncol', 0)
        bandwidth = kwargs.get('bandwidth', 0)
        matrix = kwargs.get('matrix', None)
        sizeHint = kwargs.get('sizeHint', 0)
        symmetric = 'symmetric' in kwargs and kwargs['symmetric']
        size = kwargs.get('size',0)
        if size > 0:
            if nrow > 0 or ncol > 0:
                if size != nrow or size != ncol:
                    msg =  'size argument was given but does not match '
                    msg += 'nrow and ncol'
                raise ValueError, msg
            else:
                nrow = ncol = size

        if matrix is not None:
            self.matrix = matrix
        else:
            if symmetric and nrow==ncol:
                if sizeHint is None:
                    sizeHint = nrow
                    if bandwidth > 0:
                        sizeHint += 2*(bandwidth-1)*(2*nrow-bandwidth-2)
                self.matrix = spmatrix.ll_mat_sym(nrow, sizeHint)
            else:
                if sizeHint is None:
                    sizeHint = min(nrow,ncol)
                    if bandwidth > 0:
                        sizeHint = bandwidth * (2*sizeHint-bandwidth-1)/2
                self.matrix = spmatrix.ll_mat(nrow, ncol, sizeHint)
Exemplo n.º 6
0
def poisson1d_sym_vec(n):
    L = spmatrix.ll_mat_sym(n, 2*n-1)
    e = numpy.ones(n)
    d = numpy.arange(n, dtype=numpy.int)
    L.put(2*e, d, d)
    L.put(-e[1:], d[1:], d[:-1])
    return L
Exemplo n.º 7
0
def poisson2d_sym_blk_vec(n):
    n2 = n*n
    L = spmatrix.ll_mat_sym(n2, 3*n2-2*n)
    D = spmatrix.ll_mat_sym(n, 2*n-1)
    e = numpy.ones(n)
    d = numpy.arange(n, dtype=numpy.int)
    D.put(4*e, d, d)
    D.put(-e[1:], d[1:], d[:-1])
    P = spmatrix.ll_mat(n, n, n-1)
    P.put(-e,d,d)
    for i in xrange(n-1):
        L[i*n:(i+1)*n, i*n:(i+1)*n] = D
        L[(i+1)*n:(i+2)*n, i*n:(i+1)*n] = P
    # Last diagonal block
    L[n2-n:n2, n2-n:n2] = D
    return L
Exemplo n.º 8
0
def main():
    alpha = float(sys.argv[1])
    graph = Graph.Read_GraphMLz("wiki.graphmlz")
    graph.to_undirected()
    self_loops = [edge.index for edge in graph.es if edge.source == edge.target]
    graph.delete_edges(self_loops)
    
    degree = graph.degree()
    size = len(graph.vs)

    #create X(u)
    chi_u = zeros(size)
    chi_u[int(random.random()*(size-1))] = 1.0

    #transform walk and X(u) to find personal pagerank
    walk = get_walk(graph, degree)
    walk *= (1-alpha)
    walk = identity(size) - walk
    chi_u *= alpha

    #find p(u)
    A = spmatrix.ll_mat_sym(size)
    for i in range(size):
        for j in range(size):
            if j <= i:
                A[i,j] = walk[i][j]
    walk = None
    p_u = numpy.empty(size)
    info, iter_iter, relres = pcg(A.to_sss(),chi_u,p_u,1e-12,10000)
    A = None
    print "%s : %s : %s" % (info, iter_iter, relres)

    #find conductance by creating q(u) = p(u) / d(u)
    q_u = p_u.copy()
    for index in range(len(q_u)):
        q_u[index] /= degree[index]
    sort_q = list(argsort(q_u))
    sort_q.reverse()
    report_conductance(graph, sort_q, alpha)

    #find set T from sorted p(u)
    well_spread = True
    sort_p = list(argsort(p_u))
    sort_p.reverse()

    #calculate limit val
    d_T = 0.0
    d_V = sum(degree)
    for i in range(size/2):
        d_T += degree[sort_p[i]]
    limit = (3.0*d_T) / (2.0*d_V)

    #report if p(u) is well spread
    for i in range(size/2):
        if p_u[sort_p[i]] > limit:
            well_spread = False
            break

    print "p(u) with alpha %s is well spread? %s" % (alpha, well_spread)
Exemplo n.º 9
0
def SpecSheet(n=10000):
    """
    Implement the example from the GLTR spec sheet
    """
    g = np.ones(n)
    H = spmatrix.ll_mat_sym(n, 2*n-1)
    for i in range(n): H[i,i] = -2
    for i in range(1, n): H[i,i-1] = 1
    return (H, g)
Exemplo n.º 10
0
def poisson2d_vec_sym(n):
    n2 = n * n
    L = spmatrix.ll_mat_sym(n2, 3 * n2 - 2 * n)
    d = numpy.arange(n2, dtype=numpy.int)
    L.put(4.0, d)
    L.put(-1.0, d[n:], d[:-n])
    for i in xrange(n):
        di = d[i * n:(i + 1) * n]
        L.put(-1.0, di[:-1], di[1:])
    return L
Exemplo n.º 11
0
def poisson2d_vec_sym(n):
    n2 = n*n
    L = spmatrix.ll_mat_sym(n2, 3*n2-2*n)
    d = numpy.arange(n2, dtype=numpy.int)
    L.put(4.0, d)
    L.put(-1.0, d[n:], d[:-n])
    for i in xrange(n):
        di = d[i*n:(i+1)*n]
        L.put(-1.0, di[:-1], di[1:])
    return L
Exemplo n.º 12
0
def poisson2d_sym(n):
    L = spmatrix.ll_mat_sym(n*n)
    for i in range(n):
        for j in range(n):
            k = i + n*j
            L[k,k] = 4
            if i > 0:
                L[k,k-1] = -1
            if j > 0:
                L[k,k-n] = -1
    return L
Exemplo n.º 13
0
def poisson2d_sym(n):
    L = spmatrix.ll_mat_sym(n*n)
    for i in range(n):
        for j in range(n):
            k = i + n*j
            L[k,k] = 4
            if i > 0:
                L[k,k-1] = -1
            if j > 0:
                L[k,k-n] = -1
    return L
Exemplo n.º 14
0
def SpecSheet(n=10000):
    """
    Implement the example from the GLTR spec sheet
    """
    g = np.ones(n)
    H = spmatrix.ll_mat_sym(n, 2 * n - 1)
    for i in range(n):
        H[i, i] = -2
    for i in range(1, n):
        H[i, i - 1] = 1
    return (H, g)
Exemplo n.º 15
0
    def compute_scaling_cons(self, x=None, g_max=1.0e2, reset=False):
        """
        Compute constraint scaling.

        :parameters:

            :x: Determine scaling by evaluating functions at this
                point. Default is to use :attr:`self.x0`.
            :g_max: Maximum allowed gradient. Default: :attr:`g_max = 1e2`.
            :reset: Set to `True` to unscale the problem.
        """
        # Remove scaling if requested
        if reset:
            self.scale_con = None
            self.Lcon = self.model.get_Lcon()  # lower bounds on constraints
            self.Ucon = self.model.get_Ucon()  # upper bounds on constraints
            return

        # Quick return if the problem is already scaled
        if self.scale_con is not None:
            return

        m = self.m
        if x is None: x = self.x0
        d_c = np.empty(m, dtype=np.double)
        J = self.jac(x)

        # Find inf-norm of each row of J
        gmaxNorm = 0  # holds the maxiumum row-norm of J
        imaxNorm = 0  # holds the corresponding index
        for i in xrange(m):
            giNorm = J[i, :].norm(
                '1')  # This is the matrix 1-norm (max abs col)
            d_c[i] = g_max / max(g_max, giNorm)  # <= 1 always
            if giNorm > gmaxNorm:
                gmaxNorm = giNorm
                imaxNorm = i
            gmaxNorm = max(gmaxNorm, giNorm)

        self.scale_con = d_c

        # Scale constraint bounds: componentwise multiplications
        self.Lcon *= d_c  # lower bounds on constraints
        self.Ucon *= d_c  # upper bounds on constraints

        # Form a diagonal matrix from scales; useful for scaling Jacobian
        D_c = spmatrix.ll_mat_sym(m, m)
        D_c.put(d_c, range(m))
        self.scale_con_diag = D_c

        # Return largest row norm and its index

        return (imaxNorm, gmaxNorm)
Exemplo n.º 16
0
    def compute_scaling_cons(self, x=None, g_max=1.0e2, reset=False):
        """
        Compute constraint scaling.

        :parameters:

            :x: Determine scaling by evaluating functions at this
                point. Default is to use :attr:`self.x0`.
            :g_max: Maximum allowed gradient. Default: :attr:`g_max = 1e2`.
            :reset: Set to `True` to unscale the problem.
        """
        # Remove scaling if requested
        if reset:
            self.scale_con = None
            self.Lcon = self.model.get_Lcon()  # lower bounds on constraints
            self.Ucon = self.model.get_Ucon()  # upper bounds on constraints
            return

        # Quick return if the problem is already scaled
        if self.scale_con is not None:
            return

        m = self.m
        if x is None: x = self.x0
        d_c = np.empty(m, dtype=np.double)
        J = self.jac(x)

        # Find inf-norm of each row of J
        gmaxNorm = 0            # holds the maxiumum row-norm of J
        imaxNorm = 0            # holds the corresponding index
        for i in xrange(m):
            giNorm = J[i,:].norm('1') # This is the matrix 1-norm (max abs col)
            d_c[i] = g_max / max(g_max, giNorm) # <= 1 always
            if giNorm > gmaxNorm:
                gmaxNorm = giNorm
                imaxNorm = i
            gmaxNorm = max(gmaxNorm, giNorm)

        self.scale_con = d_c

        # Scale constraint bounds: componentwise multiplications
        self.Lcon *= d_c        # lower bounds on constraints
        self.Ucon *= d_c        # upper bounds on constraints

        # Form a diagonal matrix from scales; useful for scaling Jacobian
        D_c = spmatrix.ll_mat_sym(m, m)
        D_c.put(d_c, range(m))
        self.scale_con_diag = D_c

        # Return largest row norm and its index

        return (imaxNorm, gmaxNorm)
Exemplo n.º 17
0
 def set_problem_dimensions(self, x_size, y_size, z_size, epul = 1):
     """ Defines the problem size. The sizes can either be defined as a
     a number of elements, or alternatively in physical dimensions, in which
     case epul is used to set the number of elements per unit length. The
     latter makes it easy to change the number of elements
     
     Note: Currently epul is not used for anything, the idea is to later use
     it so fixing and loading of nodes can be applied to physical dimensions
     so the resolution can easily be changed by chaning epul
     
     Parameters
     ----------
     x_size : int
         Size of the problem in x-direction
     y_size : int
         Size of the problem in y-direction
     z_size : int
         Size of the problem in z-direction
     epul: float
         Elements per unit length
     """
     self.num_elements = np.array([x_size, y_size, z_size])*epul
     self.total_elements = self.num_elements[0]*self.num_elements[1]*self.num_elements[2]
     self.num_nodes = self.num_elements + 1
     
     self.topy_dict['NUM_ELEM_X'] = self.num_elements[0]
     self.topy_dict['NUM_ELEM_Y'] = self.num_elements[1]    
     self.topy_dict['NUM_ELEM_Z'] = self.num_elements[2]   
     
     self.topy_dict['E2SDOFMAPI'] = _e2sdofmapinit(self.num_elements[0],
                                                   self.num_elements[1],
                                                   self.dof)
     
     nodes = np.arange(self.num_nodes[0]*self.num_nodes[1]*\
             self.num_nodes[2])
     Ksize = len(nodes)*self.dof                
     
     self.topy_dict['K'] = spmatrix.ll_mat_sym(Ksize, Ksize) #  Global stiffness matrix   
     
     # Create node grids:
     self.nx, self.ny, self.nz = np.indices(self.num_nodes)
     self.nodes = self.ny+self.nx*self.num_nodes[1]+\
                     self.nz*self.num_nodes[1]*self.num_nodes[0]
     
     # Create array with element numbers
     elements = np.arange(self.total_elements)
     self.elements =  elements.reshape(self.num_elements, order = 'F')
     self.element_indices = np.indices(self.num_elements)
Exemplo n.º 18
0
 def setUp(self):
     import numpy
     
     self.n = 30
     self.P = poisson.poisson1d(self.n)
     for i in range(self.n):
         self.P[i,i] = 4.0
     self.A = poisson.poisson2d(self.n)
     self.S = poisson.poisson2d_sym(self.n)
     self.I = spmatrix.ll_mat_sym(self.n)
     for i in range(self.n):
         self.I[i,i] = -1.0
     self.mask = numpy.zeros(self.n**2, 'l')
     self.mask[self.n/2*self.n:(self.n/2 + 1)*self.n] = 1
     self.mask1 = numpy.zeros(self.n**2, 'l')
     self.mask1[(self.n/2 + 1)*self.n:(self.n/2 + 2)*self.n] = 1
Exemplo n.º 19
0
def Hilbert(n):
    """
    The cream of ill conditioning: the Hilbert matrix.  See Higham,
    "Accuracy and Stability of Numerical Algoriths", section 28.1.
    The matrix has elements H(i,j) = 1/(i+j-1) when indexed
    i,j=1..n.  However, here we index as i,j=0..n-1, so the elements
    are H(i,j) = 1/(i+j+1).
    """
    if n <= 0: return None
    if n == 1: return 1.0
    nnz = n * (n - 1)/2
    H = spmatrix.ll_mat_sym(n, nnz)
    for i in range(n):
        for j in range(i+1):
            H[i,j] = 1.0/(i+j+1)
    return H
Exemplo n.º 20
0
def poisson2d_sym_vec(n):
    n2 = n*n
    L = spmatrix.ll_mat_sym(n2, 3*n2-2*n)
    e = numpy.ones(n)
    d = numpy.arange(n, dtype=numpy.int)
    din = d
    for i in xrange(n):
        # Diagonal blocks
        L.put(4*e, din, din)
        L.put(-e[1:], din[1:], din[:-1])
        # Outer blocks
        L.put(-e, n+din, din)
        din = d + i*n
    # Last diagonal block
    L.put(4*e, din, din)
    L.put(-e[1:], din[1:], din[:-1])
    return L
Exemplo n.º 21
0
def Ma27SpecSheet():
    # This is the example from the MA27 spec sheet
    # Solution should be [1,2,3,4,5]
    A = spmatrix.ll_mat_sym(5, 7)
    A[0,0] = 2
    A[1,0] = 3
    A[2,1] = 4
    A[2,2] = 1
    A[3,2] = 5
    A[4,1] = 6
    A[4,4] = 1

    rhs = numpy.ones(5, 'd')
    rhs[0] = 8
    rhs[1] = 45
    rhs[2] = 31
    rhs[3] = 15
    rhs[4] = 17

    return (A, rhs)
Exemplo n.º 22
0
Arquivo: flow.py Projeto: dougvk/CS462
def main():
    graph = Graph.Read_GraphMLz("wiki.graphmlz")
    graph.to_undirected()
    self_loops = [edge.index for edge in graph.es if edge.source == edge.target]
    graph.delete_edges(self_loops)

    percentage = float(sys.argv[1])
    size = len(graph.vs)

    #create set W, with |W| = percentage*|V|
    W = set([])
    while len(W) < percentage*size:
        random_node = random.random()*(size-1)
        W.add(int(random_node))
    W = list(W)

    #create Laplacian of graph
    L = get_L(graph, size)

    #create b(x) -- flow coming out of set W
    b = get_flow(graph, W, size)

    #solve for u(x)
    A = spmatrix.ll_mat_sym(size)
    for i in range(size):
        for j in range(size):
            if j <= i:
                A[i,j] = L[i][j]
    walk = None
    u_x = numpy.empty(size)
    info, iter_iter, relres = pcg(A.to_sss(),b,u_x,1e-12,10000)
    A = None
    print "%s : %s : %s" % (info, iter_iter, relres)

    #calculate error in between two functions
    error = 0
    for node in graph.vs:
        error += pow(int(node["original_num"]) - u_x[node.index], 2)
    error = sqrt(error)
    print error
Exemplo n.º 23
0
def ma27_spec_sheet():
    """This is the example from the MA27 spec sheet.

    Solution should be [1,2,3,4,5].
    """
    A = spmatrix.ll_mat_sym(5, 7)
    A[0, 0] = 2
    A[1, 0] = 3
    A[2, 1] = 4
    A[2, 2] = 1
    A[3, 2] = 5
    A[4, 1] = 6
    A[4, 4] = 1

    rhs = np.ones(5, 'd')
    rhs[0] = 8
    rhs[1] = 45
    rhs[2] = 31
    rhs[3] = 15
    rhs[4] = 17

    return (A, rhs)
Exemplo n.º 24
0
 def testSubmatrix(self):
     n = self.n
     Psym = poisson.poisson1d_sym(n)
     P = poisson.poisson1d(n)
     for i in range(n):
         P[i,i] = 4.0
         Psym[i,i] = 4.0
     # read and test diagonal blocks
     for i in range(n):
         self.failUnless(llmat_isEqual(self.A[n*i:n*(i+1),n*i:n*(i+1)], P))
         self.failUnless(llmat_isEqual(self.S[n*i:n*(i+1),n*i:n*(i+1)], P))
         self.failUnless(llmat_isEqual(self.A[n*i:n*(i+1),n*i:n*(i+1)], Psym))
         self.failUnless(llmat_isEqual(self.S[n*i:n*(i+1),n*i:n*(i+1)], Psym))
     # store and get diagonal blocks
     R = spmatrix_util.ll_mat_rand(n*n, n*n, 0.01) # random matrix
     for i in range(n):
         R[n*i:n*(i+1),n*i:n*(i+1)] = P
         self.failUnless(llmat_isEqual(R[n*i:n*(i+1),n*i:n*(i+1)], P))
         R[n*i:n*(i+1),n*i:n*(i+1)] = Psym
         self.failUnless(llmat_isEqual(R[n*i:n*(i+1),n*i:n*(i+1)], Psym))
     # store and get off-diagonal blocks
     for i in range(n-1):
         R[n*i:n*(i+1),n*(i+1):n*(i+2)] = P
         self.failUnless(llmat_isEqual(R[n*i:n*(i+1),n*(i+1):n*(i+2)], P))
         R[n*i:n*(i+1),n*(i+1):n*(i+2)] = Psym
         self.failUnless(llmat_isEqual(R[n*i:n*(i+1),n*(i+1):n*(i+2)], Psym))
     # store and get diagonal blocks in symmetric matrix
     R = spmatrix.ll_mat_sym(n*n)
     for i in range(n):
         R[n*i:n*(i+1),n*i:n*(i+1)] = Psym
         self.failUnless(llmat_isEqual(R[n*i:n*(i+1),n*i:n*(i+1)], Psym))
     # store and get off-diagonal blocks in symmetric matrix
     for i in range(n-1):
         R[n*(i+1):n*(i+2),n*i:n*(i+1)] = P
         self.failUnless(llmat_isEqual(R[n*(i+1):n*(i+2),n*i:n*(i+1)], P))
         R[n*(i+1):n*(i+2),n*i:n*(i+1)] = Psym
         self.failUnless(llmat_isEqual(R[n*(i+1):n*(i+2),n*i:n*(i+1)], Psym))
Exemplo n.º 25
0
def _parsev2007file(s):
    """
    Parse a version 2007 ToPy problem definition file to a dictionary.

    """
    d = {} #  Empty dictionary that we're going to fill
    snew = []
    s = s.splitlines()
    for line in range(1, len(s)):
        if s[line] and s[line][0] != '#':
            if s[line].count('#'):
                snew.append(s[line].rsplit('#')[0:-1][0])
            else:
                snew.append(s[line])
    # Check for <TAB>s; if found print lines and exit:
    _checkfortabs(snew)
    # Create dictionary containing all lines of input file:
    for i in snew:
        pair = i.split(':')
        d[pair[0].strip()] = pair[1].strip()

    # Read/convert minimum required input and convert, else exit:
    try:
        d['PROB_TYPE'] = lower(d['PROB_TYPE'])
        d['VOL_FRAC'] = float(d['VOL_FRAC'])
        d['FILT_RAD'] = float(d['FILT_RAD'])
        d['P_FAC'] = float(d['P_FAC'])
        d['NUM_ELEM_X'] = int(d['NUM_ELEM_X'])
        d['NUM_ELEM_Y'] = int(d['NUM_ELEM_Y'])
        d['NUM_ELEM_Z'] = int(d['NUM_ELEM_Z'])
        d['DOF_PN'] = int(d['DOF_PN'])
        d['ELEM_TYPE'] = d['ELEM_K']
        d['ELEM_K'] = eval(d['ELEM_TYPE'])
        try:
            d['ETA'] = float(d['ETA'])
        except ValueError:
            d['ETA'] = lower(d['ETA'])
    except:
        raise ToPyError(MSG2)

    # Check for number of iterations or change stop value:
    try:
        d['NUM_ITER'] = int(d['NUM_ITER'])
    except KeyError:
        try:
            d['CHG_STOP'] = float(d['CHG_STOP'])
        except KeyError:
            raise ToPyError(MSG2)
    except KeyError:
        raise ToPyError(MSG2)

    # Check for GSF penalty factor:
    try:
        d['Q_FAC'] = float(d['Q_FAC'])
    except KeyError:
        pass

    # Check for continuation parameters:
    try:
        d['P_MAX'] = float(d['P_MAX'])
        d['P_HOLD'] = int(d['P_HOLD'])
        d['P_INCR'] = float(d['P_INCR'])
        d['P_CON'] = float(d['P_CON'])
    except KeyError:
        pass

    try:
        d['Q_MAX'] = float(d['Q_MAX'])
        d['Q_HOLD'] = int(d['Q_HOLD'])
        d['Q_INCR'] = float(d['Q_INCR'])
        d['Q_CON'] = float(d['Q_CON'])
    except KeyError:
        pass

    # Check for active elements:
    try:
        d['ACTV_ELEM'] = _tpd2vec(d['ACTV_ELEM']) - 1
    except KeyError:
        d['ACTV_ELEM'] = _tpd2vec('')

    # Check for passive elements:
    try:
        d['PASV_ELEM'] = _tpd2vec(d['PASV_ELEM']) - 1
    except KeyError:
        d['PASV_ELEM'] = _tpd2vec('')

    # Check if diagonal quadratic approximation is required:
    try:
        d['APPROX'] = lower(d['APPROX'])
    except KeyError:
        pass

    # How to do the following compactly (perhaps loop through keys)? Check for
    # keys and create fixed DOF vector, loaded DOF vector and load values
    # vector.
    dofpn = d['DOF_PN']

    x = y = z = ''
    if d.has_key('FXTR_NODE_X'):
        x = d['FXTR_NODE_X']
    if d.has_key('FXTR_NODE_Y'):
        y = d['FXTR_NODE_Y']
    if d.has_key('FXTR_NODE_Z'):
        z = d['FXTR_NODE_Z']
    d['FIX_DOF'] = _dofvec(x, y, z, dofpn)

    x = y = z = ''
    if d.has_key('LOAD_NODE_X'):
        x = d['LOAD_NODE_X']
    if d.has_key('LOAD_NODE_Y'):
        y = d['LOAD_NODE_Y']
    if d.has_key('LOAD_NODE_Z'):
        z = d['LOAD_NODE_Z']
    d['LOAD_DOF'] = _dofvec(x, y, z, dofpn)

    x = y = z = ''
    if d.has_key('LOAD_VALU_X'):
        x = d['LOAD_VALU_X']
    if d.has_key('LOAD_VALU_Y'):
        y = d['LOAD_VALU_Y']
    if d.has_key('LOAD_VALU_Z'):
        z = d['LOAD_VALU_Z']
    d['LOAD_VAL'] = _valvec(x, y, z)

    # Compliant mechanism synthesis values and vectors:
    x = y = z = ''
    if d.has_key('LOAD_NODE_X_OUT'):
        x = d['LOAD_NODE_X_OUT']
    if d.has_key('LOAD_NODE_Y_OUT'):
        y = d['LOAD_NODE_Y_OUT']
    if d.has_key('LOAD_NODE_Z_OUT'):
        z = d['LOAD_NODE_Z_OUT']
    d['LOAD_DOF_OUT'] = _dofvec(x, y, z, dofpn)

    x = y = z = ''
    if d.has_key('LOAD_VALU_X_OUT'):
        x = d['LOAD_VALU_X_OUT']
    if d.has_key('LOAD_VALU_Y_OUT'):
        y = d['LOAD_VALU_Y_OUT']
    if d.has_key('LOAD_VALU_Z_OUT'):
        z = d['LOAD_VALU_Z_OUT']
    d['LOAD_VAL_OUT'] = _valvec(x, y, z)

    # The following entries are created and added to the dictionary,
    # they are not specified in the ToPy problem definition file:
    Ksize = d['DOF_PN'] * (d['NUM_ELEM_X'] + 1) * (d['NUM_ELEM_Y']\
    + 1) * (d['NUM_ELEM_Z'] + 1) #  Memory allocation hint for PySparse
    d['K'] = spmatrix.ll_mat_sym(Ksize, Ksize) #  Global stiffness matrix
    d['E2SDOFMAPI'] =  _e2sdofmapinit(d['NUM_ELEM_X'], d['NUM_ELEM_Y'], \
    d['DOF_PN']) #  Initial element to structure DOF mapping

    return d
Exemplo n.º 26
0
    u = zeros((n, ), 'd')
    t = zeros((n, ), 'd')
    for k in xrange(kconv):
        u = Q[:,k].copy()
        A.matvec(u, r)
        if M <> None:
            M.matvec(u, t)
        else:
            t = u
        r = r - lmbd[k]*t
        residuals[k] = sqrt(dot(r,r))
    return residuals
    
n = 1000; ncv = 5; tol = 1e-6

A = spmatrix.ll_mat_sym(n)
for i in xrange(n):
    A[i,i] = i+1.0
As = A.to_sss()

M = spmatrix.ll_mat_sym(n)
for i in xrange(n):
    M[i,i] = float(n/2) + i
Ms = M.to_sss()
normM = M[n-1,n-1]

K = diagPrecShifted(A, M, 0.006)

#-------------------------------------------------------------------------------
# Test 1: M = K = None
Exemplo n.º 27
0
def _parsev2007file(s):
    """
    Parse a version 2007 ToPy problem definition file to a dictionary.

    """
    snew = s.splitlines()[1:]
    snew = [line.split('#')[0] for line in snew] # Get rid of all comments
    snew = [line.replace('\t', '') for line in snew]
    snew = [line.replace(' ', '') for line in snew]
    snew = filter(len, snew)

    d = dict([line.split(':') for line in snew]) 


    # Read/convert minimum required input and convert, else exit:

    try:
        d['PROB_TYPE'] = lower(d['PROB_TYPE'])
        d['VOL_FRAC'] = float(d['VOL_FRAC'])
        d['FILT_RAD'] = float(d['FILT_RAD'])
        d['P_FAC'] = float(d['P_FAC'])
        d['NUM_ELEM_X'] = int(d['NUM_ELEM_X'])
        d['NUM_ELEM_Y'] = int(d['NUM_ELEM_Y'])
        d['NUM_ELEM_Z'] = int(d['NUM_ELEM_Z'])
        d['DOF_PN'] = int(d['DOF_PN'])
        d['ELEM_TYPE'] = d['ELEM_K']
        d['ELEM_K'] = eval(d['ELEM_TYPE'])
        d['ETA'] = lower(d['ETA'])
    except:
        raise ValueError('One or more parameters incorrectly specified.')

    # Check for number of iterations or change stop value:
    try:
        d['NUM_ITER'] = int(d['NUM_ITER'])
    except KeyError:
        try:
            d['CHG_STOP'] = float(d['CHG_STOP'])
        except KeyError:
            raise ValueError("Neither NUM_ITER nor CHG_STOP was declared")

    # Check for GSF penalty factor:
    try:
        d['Q_FAC'] = float(d['Q_FAC'])
    except KeyError:
        pass

    # Check for continuation parameters:
    try:
        d['P_MAX'] = float(d['P_MAX'])
        d['P_HOLD'] = int(d['P_HOLD'])
        d['P_INCR'] = float(d['P_INCR'])
        d['P_CON'] = float(d['P_CON'])
    except KeyError:
        pass

    try:
        d['Q_MAX'] = float(d['Q_MAX'])
        d['Q_HOLD'] = int(d['Q_HOLD'])
        d['Q_INCR'] = float(d['Q_INCR'])
        d['Q_CON'] = float(d['Q_CON'])
    except KeyError:
        pass

    # Check for active elements:
    try:
        d['ACTV_ELEM'] = _tpd2vec(d['ACTV_ELEM']) - 1
    except KeyError:
        d['ACTV_ELEM'] = _tpd2vec('')

    # Check for passive elements:
    try:
        d['PASV_ELEM'] = _tpd2vec(d['PASV_ELEM']) - 1
    except KeyError:
        d['PASV_ELEM'] = _tpd2vec('')

    # Check if diagonal quadratic approximation is required:
    try:
        d['APPROX'] = lower(d['APPROX'])
    except KeyError:
        pass

    # How to do the following compactly (perhaps loop through keys)? Check for
    # keys and create fixed DOF vector, loaded DOF vector and load values
    # vector.
    dofpn = d['DOF_PN']

    x = d.get('FXTR_NODE_X', '')
    y = d.get('FXTR_NODE_Y', '')
    z = d.get('FXTR_NODE_Z', '')
    d['FIX_DOF'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_NODE_X', '')
    y = d.get('LOAD_NODE_Y', '')
    z = d.get('LOAD_NODE_Z', '')
    d['LOAD_DOF'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_VALU_X', '')
    y = d.get('LOAD_VALU_Y', '')
    z = d.get('LOAD_VALU_Z', '')
    d['LOAD_VAL'] = _valvec(x, y, z)

    x = d.get('LOAD_NODE_X_OUT', '')
    y = d.get('LOAD_NODE_Y_OUT', '')
    z = d.get('LOAD_NODE_Z_OUT', '')
    d['LOAD_DOF_OUT'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_VALU_X_OUT', '')
    y = d.get('LOAD_VALU_Y_OUT', '')
    z = d.get('LOAD_VALU_Z_OUT', '')
    d['LOAD_VAL_OUT'] = _valvec(x, y, z)

    # The following entries are created and added to the dictionary,
    # they are not specified in the ToPy problem definition file:
    Ksize = d['DOF_PN'] * (d['NUM_ELEM_X'] + 1) * (d['NUM_ELEM_Y'] + 1) * \
    (d['NUM_ELEM_Z'] + 1) #  Memory allocation hint for PySparse
    d['K'] = spmatrix.ll_mat_sym(Ksize, Ksize) #  Global stiffness matrix
    d['E2SDOFMAPI'] =  _e2sdofmapinit(d['NUM_ELEM_X'], d['NUM_ELEM_Y'], \
    d['DOF_PN']) #  Initial element to structure DOF mapping

    return d
Exemplo n.º 28
0
def _parsev2007file(s):
    """
    Parse a version 2007 ToPy problem definition file to a dictionary.

    """
    d = {}  #  Empty dictionary that we're going to fill
    snew = []
    s = s.splitlines()
    for line in range(1, len(s)):
        if s[line] and s[line][0] != '#':
            if s[line].count('#'):
                snew.append(s[line].rsplit('#')[0:-1][0])
            else:
                snew.append(s[line])
    # Check for <TAB>s; if found print lines and exit:
    _checkfortabs(snew)
    # Create dictionary containing all lines of input file:
    for i in snew:
        pair = i.split(':')
        d[pair[0].strip()] = pair[1].strip()

    # Read/convert minimum required input and convert, else exit:
    try:
        d['PROB_TYPE'] = lower(d['PROB_TYPE'])
        d['VOL_FRAC'] = float(d['VOL_FRAC'])
        d['FILT_RAD'] = float(d['FILT_RAD'])
        d['P_FAC'] = float(d['P_FAC'])
        d['NUM_ELEM_X'] = int(d['NUM_ELEM_X'])
        d['NUM_ELEM_Y'] = int(d['NUM_ELEM_Y'])
        d['NUM_ELEM_Z'] = int(d['NUM_ELEM_Z'])
        d['DOF_PN'] = int(d['DOF_PN'])
        d['ELEM_TYPE'] = d['ELEM_K']
        d['ELEM_K'] = eval(d['ELEM_TYPE'])
        try:
            d['ETA'] = float(d['ETA'])
        except ValueError:
            d['ETA'] = lower(d['ETA'])
    except:
        raise ToPyError(MSG2)

    # Check for number of iterations or change stop value:
    try:
        d['NUM_ITER'] = int(d['NUM_ITER'])
    except KeyError:
        try:
            d['CHG_STOP'] = float(d['CHG_STOP'])
        except KeyError:
            raise ToPyError(MSG2)
    except KeyError:
        raise ToPyError(MSG2)

    # Check for GSF penalty factor:
    try:
        d['Q_FAC'] = float(d['Q_FAC'])
    except KeyError:
        pass

    # Check for continuation parameters:
    try:
        d['P_MAX'] = float(d['P_MAX'])
        d['P_HOLD'] = int(d['P_HOLD'])
        d['P_INCR'] = float(d['P_INCR'])
        d['P_CON'] = float(d['P_CON'])
    except KeyError:
        pass

    try:
        d['Q_MAX'] = float(d['Q_MAX'])
        d['Q_HOLD'] = int(d['Q_HOLD'])
        d['Q_INCR'] = float(d['Q_INCR'])
        d['Q_CON'] = float(d['Q_CON'])
    except KeyError:
        pass

    # Check for active elements:
    try:
        d['ACTV_ELEM'] = _tpd2vec(d['ACTV_ELEM']) - 1
    except KeyError:
        d['ACTV_ELEM'] = _tpd2vec('')

    # Check for passive elements:
    try:
        d['PASV_ELEM'] = _tpd2vec(d['PASV_ELEM']) - 1
    except KeyError:
        d['PASV_ELEM'] = _tpd2vec('')

    # Check if diagonal quadratic approximation is required:
    try:
        d['APPROX'] = lower(d['APPROX'])
    except KeyError:
        pass

    # How to do the following compactly (perhaps loop through keys)? Check for
    # keys and create fixed DOF vector, loaded DOF vector and load values
    # vector.
    dofpn = d['DOF_PN']

    x = y = z = ''
    if d.has_key('FXTR_NODE_X'):
        x = d['FXTR_NODE_X']
    if d.has_key('FXTR_NODE_Y'):
        y = d['FXTR_NODE_Y']
    if d.has_key('FXTR_NODE_Z'):
        z = d['FXTR_NODE_Z']
    d['FIX_DOF'] = _dofvec(x, y, z, dofpn)

    x = y = z = ''
    if d.has_key('LOAD_NODE_X'):
        x = d['LOAD_NODE_X']
    if d.has_key('LOAD_NODE_Y'):
        y = d['LOAD_NODE_Y']
    if d.has_key('LOAD_NODE_Z'):
        z = d['LOAD_NODE_Z']
    d['LOAD_DOF'] = _dofvec(x, y, z, dofpn)

    x = y = z = ''
    if d.has_key('LOAD_VALU_X'):
        x = d['LOAD_VALU_X']
    if d.has_key('LOAD_VALU_Y'):
        y = d['LOAD_VALU_Y']
    if d.has_key('LOAD_VALU_Z'):
        z = d['LOAD_VALU_Z']
    d['LOAD_VAL'] = _valvec(x, y, z)

    # Compliant mechanism synthesis values and vectors:
    x = y = z = ''
    if d.has_key('LOAD_NODE_X_OUT'):
        x = d['LOAD_NODE_X_OUT']
    if d.has_key('LOAD_NODE_Y_OUT'):
        y = d['LOAD_NODE_Y_OUT']
    if d.has_key('LOAD_NODE_Z_OUT'):
        z = d['LOAD_NODE_Z_OUT']
    d['LOAD_DOF_OUT'] = _dofvec(x, y, z, dofpn)

    x = y = z = ''
    if d.has_key('LOAD_VALU_X_OUT'):
        x = d['LOAD_VALU_X_OUT']
    if d.has_key('LOAD_VALU_Y_OUT'):
        y = d['LOAD_VALU_Y_OUT']
    if d.has_key('LOAD_VALU_Z_OUT'):
        z = d['LOAD_VALU_Z_OUT']
    d['LOAD_VAL_OUT'] = _valvec(x, y, z)

    # The following entries are created and added to the dictionary,
    # they are not specified in the ToPy problem definition file:
    Ksize = d['DOF_PN'] * (d['NUM_ELEM_X'] + 1) * (d['NUM_ELEM_Y']\
    + 1) * (d['NUM_ELEM_Z'] + 1) #  Memory allocation hint for PySparse
    d['K'] = spmatrix.ll_mat_sym(Ksize, Ksize)  #  Global stiffness matrix
    d['E2SDOFMAPI'] =  _e2sdofmapinit(d['NUM_ELEM_X'], d['NUM_ELEM_Y'], \
    d['DOF_PN']) #  Initial element to structure DOF mapping

    return d
Exemplo n.º 29
0
def analyze(vxg, loads, boundary, iter):
    """
    main analysis function
       - vxg: voxel grid (3d list)
       - loads: each consisting of
           * points [point set #1, point set #2 ...]
           * value [value #1, value #2, ...]
       - boundary
           * points
       - iter: whether to use iterative or direct solver
        (points are element numbers)
    output:
       - displacement vector
       - von Mises stress vector
    """
    global Ke, B, C

    nz = len(vxg)
    ny = len(vxg[0])
    nx = len(vxg[0][0])
    _log('voxelization')
    print('voxel grid: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz))

    # compute stiffness matrix for individual elements
    DOF = 3
    ksize = DOF * (nx + 1) * (ny + 1) * (nz + 1)
    kall = spmatrix.ll_mat_sym(ksize, ksize)

    SOLID = 1.000
    VOID = 0.001
    for i in range(0, nz):
        for j in range(0, ny):
            for k in range(0, nx):
                xe = SOLID if vxg[i][j][k] == 1 else VOID
                nodes = _node_nums_3d(nx, ny, nz, k + 1, j + 1, i + 1)
                ind = []
                for n in nodes:
                    ind.extend([(n - 1) * DOF, (n - 1) * DOF + 1,
                                (n - 1) * DOF + 2])
                mask = np.ones(len(ind), dtype=int)
                kall.update_add_mask_sym(Ke * xe, ind, mask)

    _log('updated stiffness matrix for all elements')

    # formulate loading scenario
    rall = [0] * ksize
    indicesset = loads['points']
    values = loads['values']

    for i in range(0, len(indicesset)):
        indices = indicesset[i]
        value = values[i]
        for idx in indices:
            nodes = _node_nums_3d(nx, ny, nz, idx[0] + 1, idx[1] + 1,
                                  idx[2] + 1)
            for j in range(0, DOF):
                for k in range(0, len(nodes)):
                    rall[DOF * (nodes[k] - 1) + j] = value[j]

    # formulate boundary condition
    elemmask = [1] * (nx + 1) * (ny + 1) * (nz + 1)
    for idx in boundary:
        nodes = _node_nums_3d(nx, ny, nz, idx[0] + 1, idx[1] + 1, idx[2] + 1)
        for j in range(0, len(nodes)):
            elemmask[nodes[j] - 1] = 0

    freedofs = []
    fixeddofs = []
    for i in range(0, len(elemmask)):
        if elemmask[i] == 1:
            freedofs.extend((DOF * i, DOF * i + 1, DOF * i + 2))
        else:
            fixeddofs.extend((DOF * i, DOF * i + 1, DOF * i + 2))

    _log('formulated loading scenario and boundary condition')

    # solve KU=F
    rfree = np.take(rall, freedofs)
    dfree = np.empty(len(freedofs))

    alldofs = np.arange(ksize)
    rcfixed = np.where(np.in1d(alldofs, fixeddofs), 0, 1)
    kfree = kall
    kfree.delete_rowcols(rcfixed)

    _log('removed constrained elements')

    if iter:
        kfree = kfree.to_sss()
        prek = precon.ssor(kfree)
        (info, numitr, relerr) = itsolvers.pcg(kfree, rfree, dfree, 1e-8, 8000,
                                               prek)
        if info >= 0:
            print('converged after ' + str(numitr) +
                  ' iterations with error of ' + str(relerr))
        else:
            print('PySparse error: Type:' + info + ', at' + str(numitr) +
                  'iterations.')
    else:
        kfree = kfree.to_csr()
        lu = superlu.factorize(kfree)
        lu.solve(rfree, dfree)

    _log('solved KU=F')

    dall = np.zeros_like(rall)
    for i in range(0, len(freedofs)):
        dall[freedofs[i]] = dfree[i]

    # compute stress
    cb = C * B
    vonmises = []
    for i in range(0, nz):
        vmplane = []
        for j in range(0, ny):
            vmrow = []
            for k in range(0, nx):
                nodes = _node_nums_3d(nx, ny, nz, k + 1, j + 1, i + 1)
                disps = []
                for n in nodes:
                    disps.extend([
                        dall[DOF * (n - 1)], dall[DOF * (n - 1) + 1],
                        dall[DOF * (n - 1) + 2]
                    ])
                d = np.matrix(disps).transpose()
                sigma = cb * d

                s11 = sigma.item(0, 0)
                s22 = sigma.item(1, 0)
                s33 = sigma.item(2, 0)
                s12 = sigma.item(3, 0) * 0.5  # DOUBLE CHECK THIS
                s23 = sigma.item(4, 0) * 0.5
                s31 = sigma.item(5, 0) * 0.5

                # von Mises stress, cf. Strava et al.'s Stress Relief paper (SIGGRAPH '12)
                vmrow.append(
                    sqrt(0.5 *
                         ((s11 - s22)**2 + (s22 - s33)**2 +
                          (s33 - s11)**2 + 6 * (s12**2 + s23**2 + s31**2))))
            vmplane.append(vmrow)
        vonmises.append(vmplane)

    t1 = _log('computed stress')

    global t0
    print('total time:' + str(t1 - t0) + ' ms')

    return {'displacements': dall.tolist(), 'stress': vonmises}
Exemplo n.º 30
0
import numpy as np
from nlpy.tools.timing import cputime
import sys

if len(sys.argv) < 3:
    sys.stderr.write('Please supply two positive definite matrices as input')
    sys.stderr.write(' in MatrixMarket format.\n')
    sys.exit(1)

# Create symmetric quasi-definite matrix K
A = spmatrix.ll_mat_from_mtx(sys.argv[1])
C = spmatrix.ll_mat_from_mtx(sys.argv[2])

nA = A.shape[0]
nC = C.shape[0]
K = spmatrix.ll_mat_sym(nA + nC, A.nnz + C.nnz + min(nA, nC))
K[:nA, :nA] = A
K[nA:, nA:] = C
K[nA:, nA:].scale(-1.0)
idx = np.arange(min(nA, nC), dtype=np.int)
K.put(1, nA + idx, idx)

# Create right-hand side rhs=K*e
e = np.ones(nA + nC)
rhs = np.empty(nA + nC)
K.matvec(e, rhs)

# Factorize and solve Kx = rhs, knowing K is sqd
t = cputime()
P = LBLContext(K, sqd=True)
t = cputime() - t
Exemplo n.º 31
0
def speye(n):
    A = spmatrix.ll_mat_sym(n, n)
    for i in xrange(n):
        A[i,i] = 1.0
    return A
Exemplo n.º 32
0
def _parse_dict(d):
# Ввод и преобразование параметров:
    d = d.copy()
    try:
        d['PROB_TYPE'] = d['PROB_TYPE'].lower()
        d['VOL_FRAC'] = float(d['VOL_FRAC'])
        d['FILT_RAD'] = float(d['FILT_RAD'])
        d['P_FAC'] = float(d['P_FAC'])
        d['NUM_ELEM_X'] = int(d['NUM_ELEM_X'])
        d['NUM_ELEM_Y'] = int(d['NUM_ELEM_Y'])
        d['NUM_ELEM_Z'] = int(d['NUM_ELEM_Z'])
        d['DOF_PN'] = int(d['DOF_PN'])
        d['ETA'] = str(d['ETA']).lower()
        d['ELEM_TYPE'] = d['ELEM_K']
        d['ELEM_K'] = eval(d['ELEM_TYPE'])
    except:
        raise ValueError('One or more parameters incorrectly specified.')

# Проверить количество итераций или изменить значения критерия остановки:
    try:
        d['NUM_ITER'] = int(d['NUM_ITER'])
    except KeyError:
        try:
            d['CHG_STOP'] = float(d['CHG_STOP'])
        except KeyError:
            raise ValueError("Neither NUM_ITER nor CHG_STOP was declared")

# Изменить GSF фактор:
    try:
        d['Q_FAC'] = float(d['Q_FAC'])
    except KeyError:
        pass

# Check for continuation parameters:
    try:
        d['P_MAX'] = float(d['P_MAX'])
        d['P_HOLD'] = int(d['P_HOLD'])
        d['P_INCR'] = float(d['P_INCR'])
        d['P_CON'] = float(d['P_CON'])
    except KeyError:
        pass

    try:
        d['Q_MAX'] = float(d['Q_MAX'])
        d['Q_HOLD'] = int(d['Q_HOLD'])
        d['Q_INCR'] = float(d['Q_INCR'])
        d['Q_CON'] = float(d['Q_CON'])
    except KeyError:
        pass

# Проверка элементов:
    try:
        d['ACTV_ELEM'] = _tpd2vec(d['ACTV_ELEM']) - 1
    except KeyError:
        d['ACTV_ELEM'] = _tpd2vec('')
    except AttributeError:
        pass

    try:
        d['PASV_ELEM'] = _tpd2vec(d['PASV_ELEM']) - 1
    except KeyError:
        d['PASV_ELEM'] = _tpd2vec('')
    except AttributeError:
        pass

# Проверка, требуется ли диагонально-квадратичная аппроксимация:
    try:
        d['APPROX'] = d['APPROX'].lower()
    except KeyError:
        pass

# Как сделать следующее компактно (возможно, перебрать ключи)? Проверить клавиш и создать фиксированный вектор глубины резкости, загруженный вектор глубины резкости и значения нагрузки вектор.
    dofpn = d['DOF_PN']

    x = d.get('FXTR_NODE_X', '')
    y = d.get('FXTR_NODE_Y', '')
    z = d.get('FXTR_NODE_Z', '')
    d['FIX_DOF'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_NODE_X', '')
    y = d.get('LOAD_NODE_Y', '')
    z = d.get('LOAD_NODE_Z', '')
    d['LOAD_DOF'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_VALU_X', '')
    y = d.get('LOAD_VALU_Y', '')
    z = d.get('LOAD_VALU_Z', '')
    d['LOAD_VAL'] = _valvec(x, y, z)

    x = d.get('LOAD_NODE_X_OUT', '')
    y = d.get('LOAD_NODE_Y_OUT', '')
    z = d.get('LOAD_NODE_Z_OUT', '')
    d['LOAD_DOF_OUT'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_VALU_X_OUT', '')
    y = d.get('LOAD_VALU_Y_OUT', '')
    z = d.get('LOAD_VALU_Z_OUT', '')
    d['LOAD_VAL_OUT'] = _valvec(x, y, z)


    # The following entries are created and added to the dictionary,
    # they are not specified in the ToPy problem definition file:
    Ksize = d['DOF_PN'] * (d['NUM_ELEM_X'] + 1) * (d['NUM_ELEM_Y'] + 1) * \
    (d['NUM_ELEM_Z'] + 1) # Выделении памяти для PySparse
    d['K'] = spmatrix.ll_mat_sym(Ksize, Ksize) # Глобальная матрица жесткости
    d['E2SDOFMAPI'] =  _e2sdofmapinit(d['NUM_ELEM_X'], d['NUM_ELEM_Y'], \
    d['DOF_PN']) # Начальный элемент структуры отображения DOF

    return d
Exemplo n.º 33
0
 def setUp(self):
     self.n = 10
     self.A = spmatrix.ll_mat(self.n, self.n)
     self.S = spmatrix.ll_mat_sym(self.n)
Exemplo n.º 34
0
 def __init__(self, n, edges=[]):
     self.W = spmatrix.ll_mat_sym(n)
     self.n = n
     self.file_tmp = 'matrix_out'
     for edge in edges:
         self.add_edge(edge[0], edge[1], edge[2])
Exemplo n.º 35
0
import traceback
from pysparse import spmatrix_util, spmatrix


def printMatrix(M):
    n, m = M.shape
    Z = Numeric.zeros((n, m), 'd')
    for i in range(n):
        for j in range(m):
            Z[i, j] = M[i, j]
    print str(Z) + '\n'


n = 10
A = spmatrix.ll_mat(n, n)
As = spmatrix.ll_mat_sym(n)
Is = spmatrix.ll_mat_sym(n)
I = spmatrix.ll_mat(n, n)
Os = spmatrix.ll_mat_sym(n)
O = spmatrix.ll_mat(n, n)

for i in range(n):
    for j in range(n):
        if i >= j:
            A[i, j] = 10 * i + j
        else:
            A[i, j] = 10 * j + i
        O[i, j] = 1

for i in range(n):
    for j in range(n):
Exemplo n.º 36
0
# winak
# This file is Copyright Daniel Strobusch
#

from pysparse import spmatrix
from winak.curvilinear.numeric import *
LLMatType = type(spmatrix.ll_mat_sym(1, 1))
SSSMatType = type(spmatrix.ll_mat_sym(1, 1).to_sss())
CSRMatType = type(spmatrix.ll_mat_sym(1, 1).to_csr())


class spmatrixIterator:
    def __init__(self, matrix):
        self.matrix = matrix
        if type(self.matrix) == SSSMatType:
            self.diag, self.val, self.col, self.ind = matrix.matrices()
            self.n = len(self.diag)
            self.nod = len(self.val)
            self.nnz = self.n + self.nod
            self.__iter__ = self.iterSSS
        elif type(self.matrix) == LLMatType:
            self.root, self.link, self.col, self.val = matrix.matrices()
            self.n = len(self.root)
            self.nnz = len(self.val)
            self.__iter__ = self.iterLL
        elif type(self.matrix) == CSRMatType:
            self.val, self.col, self.ind = matrix.matrices()
            self.nnz = len(self.val)
            self.__iter__ = self.iterCSR

    def iterLL(self):
Exemplo n.º 37
0
        for p in primes[:nof]:
            if i % p == 0 or p * p > i: break
        if i % p <> 0:
            primes[nof] = i
            nof += 1
            if nof >= nofPrimes:
                break
        i = i + 2
    return primes


n = 20000

primes = get_primes(n)

A = spmatrix.ll_mat_sym(n, n * 8)
d = 1
while d < n:
    for i in range(d, n):
        A[i, i - d] = 1.0
    d *= 2
for i in range(n):
    A[i, i] = primes[i]

A = A.to_sss()
K = precon.ssor(A)

b = Numeric.zeros(n, 'd')
b[0] = 1.0
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-16, n, K)
Exemplo n.º 38
0
import numpy as Numeric
import traceback
from pysparse import spmatrix_util, spmatrix

def printMatrix(M):
    n, m = M.shape
    Z = Numeric.zeros((n,m), 'd')
    for i in range(n):
        for j in range(m):
            Z[i,j] = M[i,j]
    print str(Z) + '\n'
    
n = 10
A = spmatrix.ll_mat(n,n)
As = spmatrix.ll_mat_sym(n)
Is = spmatrix.ll_mat_sym(n)
I = spmatrix.ll_mat(n,n)
Os = spmatrix.ll_mat_sym(n)
O = spmatrix.ll_mat(n,n)

for i in range(n):
    for j in range(n):
        if i >= j:
            A[i,j] = 10*i + j
        else:
            A[i,j] = 10*j + i
        O[i,j] = 1
            
for i in range(n):
    for j in range(n):
        if i >= j:
Exemplo n.º 39
0
    while 1:
        for p in primes[:nof]:
            if i%p == 0 or p*p > i: break
        if i%p <> 0:
            primes[nof] = i
            nof += 1
            if nof >= nofPrimes:
                break
        i = i+2
    return primes

n = 20000

primes = get_primes(n)

A = spmatrix.ll_mat_sym(n, n*8)
d = 1
while d < n:
    for i in range(d, n):
        A[i,i-d] = 1.0
    d *= 2
for i in range(n):
    A[i,i] = primes[i]

A = A.to_sss()
K = precon.ssor(A)

b = Numeric.zeros(n, 'd'); b[0] = 1.0
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-16, n, K)
Exemplo n.º 40
0
def _parse_dict(d):
    # Read/convert minimum required input and convert, else exit:
    d = d.copy()
    try:
        d['PROB_TYPE'] = lower(d['PROB_TYPE'])
        d['VOL_FRAC'] = float(d['VOL_FRAC'])
        d['FILT_RAD'] = float(d['FILT_RAD'])
        d['P_FAC'] = float(d['P_FAC'])
        d['NUM_ELEM_X'] = int(d['NUM_ELEM_X'])
        d['NUM_ELEM_Y'] = int(d['NUM_ELEM_Y'])
        d['NUM_ELEM_Z'] = int(d['NUM_ELEM_Z'])
        d['DOF_PN'] = int(d['DOF_PN'])
        d['ETA'] = lower(str(d['ETA']))
        d['ELEM_TYPE'] = d['ELEM_K']
        d['ELEM_K'] = eval(d['ELEM_TYPE'])
    except:
        raise ValueError('One or more parameters incorrectly specified.')

    # Check for number of iterations or change stop value:
    try:
        d['NUM_ITER'] = int(d['NUM_ITER'])
    except KeyError:
        try:
            d['CHG_STOP'] = float(d['CHG_STOP'])
        except KeyError:
            raise ValueError("Neither NUM_ITER nor CHG_STOP was declared")

    # Check for GSF penalty factor:
    try:
        d['Q_FAC'] = float(d['Q_FAC'])
    except KeyError:
        pass

    # Check for continuation parameters:
    try:
        d['P_MAX'] = float(d['P_MAX'])
        d['P_HOLD'] = int(d['P_HOLD'])
        d['P_INCR'] = float(d['P_INCR'])
        d['P_CON'] = float(d['P_CON'])
    except KeyError:
        pass

    try:
        d['Q_MAX'] = float(d['Q_MAX'])
        d['Q_HOLD'] = int(d['Q_HOLD'])
        d['Q_INCR'] = float(d['Q_INCR'])
        d['Q_CON'] = float(d['Q_CON'])
    except KeyError:
        pass

    # Check for active elements:
    try:
        d['ACTV_ELEM'] = _tpd2vec(d['ACTV_ELEM']) - 1
    except KeyError:
        d['ACTV_ELEM'] = _tpd2vec('')
    except AttributeError:
        pass

    # Check for passive elements:
    try:
        d['PASV_ELEM'] = _tpd2vec(d['PASV_ELEM']) - 1
    except KeyError:
        d['PASV_ELEM'] = _tpd2vec('')
    except AttributeError:
        pass

    # Check if diagonal quadratic approximation is required:
    try:
        d['APPROX'] = lower(d['APPROX'])
    except KeyError:
        pass

    # How to do the following compactly (perhaps loop through keys)? Check for
    # keys and create fixed DOF vector, loaded DOF vector and load values
    # vector.
    dofpn = d['DOF_PN']

    x = d.get('FXTR_NODE_X', '')
    y = d.get('FXTR_NODE_Y', '')
    z = d.get('FXTR_NODE_Z', '')
    d['FIX_DOF'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_NODE_X', '')
    y = d.get('LOAD_NODE_Y', '')
    z = d.get('LOAD_NODE_Z', '')
    d['LOAD_DOF'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_VALU_X', '')
    y = d.get('LOAD_VALU_Y', '')
    z = d.get('LOAD_VALU_Z', '')
    d['LOAD_VAL'] = _valvec(x, y, z)

    x = d.get('LOAD_NODE_X_OUT', '')
    y = d.get('LOAD_NODE_Y_OUT', '')
    z = d.get('LOAD_NODE_Z_OUT', '')
    d['LOAD_DOF_OUT'] = _dofvec(x, y, z, dofpn)

    x = d.get('LOAD_VALU_X_OUT', '')
    y = d.get('LOAD_VALU_Y_OUT', '')
    z = d.get('LOAD_VALU_Z_OUT', '')
    d['LOAD_VAL_OUT'] = _valvec(x, y, z)

    # The following entries are created and added to the dictionary,
    # they are not specified in the ToPy problem definition file:
    Ksize = d['DOF_PN'] * (d['NUM_ELEM_X'] + 1) * (d['NUM_ELEM_Y'] + 1) * \
    (d['NUM_ELEM_Z'] + 1) #  Memory allocation hint for PySparse
    d['K'] = spmatrix.ll_mat_sym(Ksize, Ksize)  #  Global stiffness matrix
    d['E2SDOFMAPI'] =  _e2sdofmapinit(d['NUM_ELEM_X'], d['NUM_ELEM_Y'], \
    d['DOF_PN']) #  Initial element to structure DOF mapping

    return d
Exemplo n.º 41
0
import numpy as np
from nlpy.tools.timing import cputime
import sys

if len(sys.argv) < 3:
    sys.stderr.write('Please supply two positive definite matrices as input')
    sys.stderr.write(' in MatrixMarket format.\n')
    sys.exit(1)

# Create symmetric quasi-definite matrix K
A = spmatrix.ll_mat_from_mtx(sys.argv[1])
C = spmatrix.ll_mat_from_mtx(sys.argv[2])

nA = A.shape[0]
nC = C.shape[0]
K = spmatrix.ll_mat_sym(nA + nC, A.nnz + C.nnz + min(nA,nC))
K[:nA,:nA] = A
K[nA:,nA:] = C
K[nA:,nA:].scale(-1.0)
idx = np.arange(min(nA,nC), dtype=np.int)
K.put(1, nA+idx, idx)

# Create right-hand side rhs=K*e
e = np.ones(nA+nC)
rhs = np.empty(nA+nC)
K.matvec(e,rhs)

# Factorize and solve Kx = rhs, knowing K is sqd
t = cputime()
P = LBLContext(K, sqd=True)
t = cputime() - t
Exemplo n.º 42
0
 def testNormSymmetric(self):
     A = spmatrix.ll_mat_sym(4)
     A[0,0] = 1; A[1,1] = 2; A[2,2] = 3; A[3,3] = 4;
     A[1,0] = 3; A[2,0] = 2; A[3,0] = 2; 
     self.failUnless(A.norm('fro') == 8)