def __mul__(self, other):
     if isinstance(other, (N.ndarray, list, tuple)):
         # This promotes 1-D vectors to row vectors
         return N.dot(self, asmatrix(other))
     if isscalar(other) or not hasattr(other, '__rmul__'):
         return N.dot(self, other)
     return NotImplemented
Exemple #2
0
    def __gradientDecent(self,
                         datamat,
                         para=zeros((1, 2)),
                         learningRate=1,
                         iterNum=500):
        # optimization code goes here

        __size = datamat[:, 1].size
        __parameterVector = para

        print("Gradient Descent is finding optimal parameters")
        for i in range(0, iterNum):
            __t0 = __parameterVector[0]
            __t1 = __parameterVector[1]

            __t0 = __t0 - (learningRate / __size) * (
                dot(datamat[i, 0:2], __parameterVector) - datamat[i, 2])
            __t1 = __t1 - (learningRate /
                           __size) * (dot(datamat[i, 0:2], __parameterVector) -
                                      datamat[i, 2]) * datamat[i, 1]

            __parameterVector[0] = __t0
            __parameterVector[1] = __t1

        minPara = (__t0, __t1)

        print("Gradient Descent is complete")
        return minPara
Exemple #3
0
 def __mul__(self, other):
     if isinstance(other, (N.ndarray, list, tuple)) :
         # This promotes 1-D vectors to row vectors
         return N.dot(self, asmatrix(other))
     if isscalar(other) or not hasattr(other, '__rmul__') :
         return N.dot(self, other)
     return NotImplemented
Exemple #4
0
 def __rmul__(self, other):
     # ! NumPy's matrix __rmul__ uses an apparently a restrictive
     # dot() function that cannot handle the multiplication of a
     # scalar and of a matrix containing objects (when the
     # arguments are given in this order).  We go around this
     # limitation:
     if numeric.isscalar(other):
         return numeric.dot(self, other)
     else:
         return numeric.dot(other, self)  # The order is important
Exemple #5
0
 def __rmul__(self, other):
     # ! NumPy's matrix __rmul__ uses an apparently restrictive
     # dot() function that cannot handle the multiplication of a
     # scalar and of a matrix containing objects (when the
     # arguments are given in this order).  We go around this
     # limitation:
     if numeric.isscalar(other):
         return numeric.dot(self, other)
     else:
         return numeric.dot(other, self)  # The order is important
Exemple #6
0
 def __mul__(self, other):
     if self.shape == (1,1):
         # extract scalars from singleton matrices (self)
         return N.dot(self.flat[0], other)
     if isinstance(other, N.ndarray) and other.shape == (1,1):
         # extract scalars from singleton matrices (other)
         return N.dot(self, other.flat[0])
     if isinstance(other, (N.ndarray, list, tuple)) :
         # This promotes 1-D vectors to row vectors
         return N.dot(self, asmatrix(other))
     if isscalar(other) or not hasattr(other, '__rmul__') :
         return N.dot(self, other)
     return NotImplemented
Exemple #7
0
def generate_nmf_test(numFactors, density):
    allUsers = User.objects.all()
    allBusinsses = Business.objects.all()
    random.seed(666)
    newP = []
    for u in range(0, allUsers.count()):
        if u not in newP:
            newP.append([])
        for k in range(0, numFactors):
            rif = random.uniform(0, 1)
            newP[u].append(rif)

    newQ = []
    for k in range(0, numFactors):
        newQ.append([])
        for j in range(0, allBusinsses.count()):
            rif = random.uniform(0, 1)
            newQ[k].append(rif)

    initR = dot(newP, newQ)

    i = 0
    for u in allUsers:
        j = 0
        for b in allBusinsses:
            chance = random.uniform(0, 1)
            if(chance < density):
                rat = Rating(business=b, username=u, rating=float(initR[i][j]))
                rat.save()
            j = j + 1
    i = i + 1
Exemple #8
0
 def _removeNonTracklikeClusterCenters(self):
   '''NOTE : Much of this code is copied from LPCMImpl.followXSingleDirection (factor out?)
   '''
   labels = self._meanShift.labels_
   labels_unique = unique(labels)
   cluster_centers = self._meanShift.cluster_centers_
   rsp = lpcRandomStartPoints()
   cluster_representatives = []
   for k in range(len(labels_unique)):
     cluster_members = labels == k
     cluster_center = cluster_centers[k]
     cluster = self._Xi[cluster_members,:]
     mean_sub = cluster - cluster_center 
     cov_x = dot(transpose(mean_sub), mean_sub) 
     eigen_cov = eigh(cov_x)
     sorted_eigen_cov = zip(eigen_cov[0],map(ravel,vsplit(eigen_cov[1].transpose(),len(eigen_cov[1]))))
     sorted_eigen_cov.sort(key = lambda elt: elt[0], reverse = True)   
     rho = sorted_eigen_cov[1][0] / sorted_eigen_cov[0][0] #Ratio of two largest eigenvalues   
     if rho < self._lpcParameters['rho_threshold']:
       cluster_representatives.append(cluster_center)
     else: #append a random element of the cluster
       random_cluster_element = rsp(cluster, 1)[0]
       cluster_representatives.append(random_cluster_element)
   
   return array(cluster_representatives)
Exemple #9
0
def vectors_comparison(
    model1, model2, stem
):  #stampa i vettori della parola nei due modelli e la loro cosin similarity.
    v1 = model1[stem]
    v2 = model2[stem]
    cosim = dot(unitvec(v1), unitvec(v2))
    print 'Cosine similarity between vectors: ' + str(cosim)
Exemple #10
0
 def covariance(self, x=None, bias=True):
     """        
     x: shape = (n_sample, n_feature) 
     bias: 
     return: X, X is cetered x and its dimensions are m*n
             S, S is covariance matrix, and its dimensions are m*m.
     """
     x = np.array(x)
     x = x.astype(float)
     n = x.shape[0]  # number of column of matrix, i.e., number of nodes.
     m = x.shape[1]  # number of column of matrix, i.e., number of pivots.
     print("number of nodes:", n)
     print("number of pivots:", m)
     r = self.average(x, axis=0)
     print("shape of mean vector:", x.shape)
     x -= r
     x = x.T  # transfer
     X = x  # X is centered x
     S = dot(x, x.T)
     if bias is False:
         S *= 1.0 / float(n - 1)
     else:
         S *= 1.0 / float(n)
     print("dimensions of S:", S.shape)
     return X, S
Exemple #11
0
def cov(m, y=None, rowvar=1, bias=0):
    """Estimate the covariance matrix.

    If m is a vector, return the variance.  For matrices return the
    covariance matrix.

    If y is given it is treated as an additional (set of)
    variable(s).

    Normalization is by (N-1) where N is the number of observations
    (unbiased estimate).  If bias is 1 then normalization is by N.

    If rowvar is non-zero (default), then each row is a variable with
    observations in the columns, otherwise each column
    is a variable and the observations are in the rows.
    """

    X = array(m, ndmin=2, dtype=float)
    if X.shape[0] == 1:
        rowvar = 1
    if rowvar:
        axis = 0
        tup = (slice(None),newaxis)
    else:
        axis = 1
        tup = (newaxis, slice(None))


    if y is not None:
        y = array(y, copy=False, ndmin=2, dtype=float)
        X = concatenate((X,y),axis)

    X -= X.mean(axis=1-axis)[tup]
    if rowvar:
        N = X.shape[1]
    else:
        N = X.shape[0]

    if bias:
        fact = N*1.0
    else:
        fact = N-1.0

    if not rowvar:
        return (dot(X.T, X.conj()) / fact).squeeze()
    else:
        return (dot(X, X.T.conj()) / fact).squeeze()
Exemple #12
0
    def fromLoop(cls, loop):
        """Returns a Model representing the loop"""
        #get necessary vectors
        offset_v = [loop.r_anchor[0].__dict__[c] - loop.l_anchor[0].__dict__[c] for c in 'xyz']
        sse0_v = Model.__get_sse_vector(loop.l_anchor, loop.atoms[0])
        sse1_v = Model.__get_sse_vector(loop.r_anchor, loop.atoms[-1]) 

        sFrame = TransformFrame.createFromVectors(loop.l_anchor[0], transform.Vec.from_array(offset_v), transform.Vec.from_array(sse0_v))
        
        #Theta and phi are the angles between the SSE and anchor-anchor vector
        theta = arccos(dot(sse0_v, negative(offset_v)) / (norm(sse0_v) * norm(offset_v)))
        phi = arccos(dot(sse1_v, offset_v) / (norm(sse1_v) * norm(offset_v)))
        
        #Length of the vectorn
        anchor_dist = norm(offset_v)
        
        return Model([loop], [Vec.from_array(sFrame.transformInto(atom)) for atom in loop.atoms], theta, phi, anchor_dist, [loop.l_type, loop.r_type], Model.__gen_seq([loop.seq]) , 1)
Exemple #13
0
 def _distancePointToLineSegment(self, a, b, p):
   '''
   Returns tuple of minimum distance to the directed line segment AB from p, and the distance along AB of the point of intersection
   '''  
   ab_mag2 = dot((b-a),(b-a))
   pa_mag2 = dot((a-p),(a-p))
   pb_mag2 = dot((b-p),(b-p))
   if pa_mag2 + ab_mag2 <= pb_mag2:
     return (sqrt(pa_mag2),0)
   elif pb_mag2 + ab_mag2 <= pa_mag2:
     return (sqrt(pb_mag2), sqrt(ab_mag2))
   else:
     c = cross((b-a),(p-a))
     if ab_mag2 == 0:
       raise ValueError, 'Division by zero magnitude line segment AB'
     dist_to_line2 = dot(c,c)/ab_mag2
     dist_to_line = sqrt(dist_to_line2)
     dist_along_segment = sqrt(pa_mag2 - dist_to_line2)
     return (dist_to_line, dist_along_segment)
def create_olfaction_Ttheta(positions, fder):
    '''
    positions: 2 x n  vector
    f_der: n x n
    '''
    require_shape((2, gt(0)), positions)
    n = positions.shape[1]
    require_shape((n, n), fder)
    
    results = ndarray(shape=(n, n))

    for i in range(n):
        J = array([ [0, -1], [1, 0]])
        Js = dot(J, positions[:, i])
        
        results[i, :] = dot(positions.transpose(), Js)

    results = results * fder # it IS element by element
    return results
Exemple #15
0
 def __gradientDecent(self,datamat,para=zeros((1,2)),learningRate=1,iterNum=500):
     # optimization code goes here 
     
     __size= datamat[:,1].size;
     __parameterVector= para;
     
     print("Gradient Descent is finding optimal parameters");
     for i in range (0,iterNum):
         __t0= __parameterVector[0];
         __t1= __parameterVector[1];
         
         __t0= __t0 - (learningRate/__size)*(dot(datamat[i,0:2],__parameterVector)- datamat[i,2]);
         __t1= __t1 - (learningRate/__size)* (dot(datamat[i,0:2],__parameterVector)- datamat[i,2])*datamat[i,1];
         
         __parameterVector[0]=__t0;
         __parameterVector[1]=__t1;
         
     minPara= (__t0,__t1);
     
     print("Gradient Descent is complete");
     return minPara;
Exemple #16
0
    def __computeCost(self, mat, para=zeros((1, 2))):
        # if para is not overridden with custom initial values, use zeros
        # cost computation code goes here

        __cost = 0.0
        __size = mat[:, 1].size

        for i in mat:
            __cost = __cost + (dot(mat[i, 0:2], para) - mat[i, 2])**2

        __cost = __cost / (2 * __size)

        return __cost
Exemple #17
0
    def transform(self, positions, R_i=None, t_i=None, s_i=None, flip=False):
        """
        Return subclusters with (randomly) rotated translated and scaled
        positions. If R_i, s_i or t_i is given then that part of transformation
        is not random.

        """

        for sub in positions:
            t = t_i or rand(2) * 10
            s = s_i or rand() * 2
            if R_i is None:
                th = 2 * pi * rand()
                # ccw
                R = array([[cos(th), -sin(th)], [sin(th), cos(th)]])
            else:
                R = R_i
            if flip:
                #TODO: make R with flip
                pass

            for node, pos in sub.items():
                sub[node] = concatenate((dot(dot(s, R), pos[:2]) + t, [nan]))
Exemple #18
0
    def transform(self, positions, R_i=None, t_i=None, s_i=None, flip=False):
        """
        Return subclusters with (randomly) rotated translated and scaled
        positions. If R_i, s_i or t_i is given then that part of transformation
        is not random.

        """

        for sub in positions:
            t = t_i or rand(2)*10
            s = s_i or rand()*2
            if R_i is None:
                th = 2*pi*rand()
                # ccw
                R = array([[cos(th), -sin(th)], [sin(th), cos(th)]])
            else:
                R = R_i
            if flip:
                #TODO: make R with flip
                pass

            for node, pos in sub.items():
                sub[node] = concatenate((dot(dot(s, R), pos[:2])+t, [nan]))
Exemple #19
0
 def __computeCost(self,mat,para=zeros((1,2))): 
     # if para is not overridden with custom initial values, use zeros
     # cost computation code goes here
     
     __cost=0.0;
     __size= mat[:,1].size;
     
     for i in mat:
         __cost=__cost + (dot(mat[i,0:2],para)- mat[i,2])**2;
         
         
     __cost= __cost/(2*__size);
     
     return __cost;
Exemple #20
0
def matrix_power(M, n, mod_val):
    # Implementation shadows numpy's matrix_power, but with modulo included
    M = asanyarray(M)
    if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
        raise ValueError("input  must be a square array")
    #if not issubdtype(type(n), int):
    #    raise TypeError("exponent must be an integer")

    from numpy.linalg import inv

    if n==0:
        M = M.copy()
        M[:] = identity(M.shape[0])
        return M
    elif n<0:
        M = inv(M)
        n *= -1

    result = M % mod_val
    if n <= 3:
        for _ in range(n-1):
            result = dot(result, M) % mod_val
        return result

    # binary decompositon to reduce the number of matrix
    # multiplications for n > 3
    beta = binary_repr(n)
    Z, q, t = M, 0, len(beta)
    while beta[t-q-1] == '0':
        Z = dot(Z, Z) % mod_val
        q += 1
    result = Z
    for k in range(q+1, t):
        Z = dot(Z, Z) % mod_val
        if beta[t-k-1] == '1':
            result = dot(result, Z) % mod_val
    return result % mod_val
Exemple #21
0
    def poweriteration(S=None, k=2, epsilon=0.01):
        """
        S: covariance matrix
        k: dimension of decomposision space
        epsilon: a constance
        U a list containing array of the first eigenvectors.
        """
        if S.shape[0] == S.shape[1]:
            m = S.shape[0]
        else:
            print("Invalid covariance matrix S.")
            return -1

        print("number of pivots:", m)
        U = []

        for i in range(k):
            num = 0
            i = i + 1
            ui_ = np.random.rand(m) # size of ui is m
            # ui_ = np.ones(m)
            ui_ /= np.sqrt(dot(ui_, ui_)) # ui_=ui_/|ui_| normolized

            ui = ui_
            for j in range(i - 1):
                ui = ui - dot(ui, U[j])*U[j]
            ui_ = np.matmul(S, ui)
            ui_ /= np.sqrt(dot(ui_, ui_))  # ui=ui/|ui| normolization

            while dot(ui_, ui) < 1 - epsilon:
            # while iteration > 0:
            #     iteration = iteration - 1
                num = num + 1
                if num % 10 == 0:
                     print("loop")
                ui = ui_

                for j in range(i - 1):
                    ui = ui - dot(ui, U[j])*U[j]

                ui_ = np.matmul(S, ui)
                ui_ /= np.sqrt(dot(ui_, ui_))  # ui=ui/|ui| normolization


            U.append(ui_)  # store eigenvector into list U
            print("number of iteration:", num)

        return U
Exemple #22
0
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
    """
    Estimate a covariance matrix, given data.

    Covariance indicates the level to which two variables vary together.
    If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
    then the covariance matrix element :math:`C_{ij}` is the covariance of
    :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
    of :math:`x_i`.

    Parameters
    ----------
    m : array_like
        A 1-D or 2-D array containing multiple variables and observations.
        Each row of `m` represents a variable, and each column a single
        observation of all those variables. Also see `rowvar` below.
    y : array_like, optional
        An additional set of variables and observations. `y` has the same
        form as that of `m`.
    rowvar : int, optional
        If `rowvar` is non-zero (default), then each row represents a
        variable, with observations in the columns. Otherwise, the relationship
        is transposed: each column represents a variable, while the rows
        contain observations.
    bias : int, optional
        Default normalization is by ``(N - 1)``, where ``N`` is the number of
        observations given (unbiased estimate). If `bias` is 1, then
        normalization is by ``N``. These values can be overridden by using
        the keyword ``ddof`` in numpy versions >= 1.5.
    ddof : int, optional
        .. versionadded:: 1.5
        If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
        the number of observations; this overrides the value implied by
        ``bias``. The default value is ``None``.

    Returns
    -------
    out : ndarray
        The covariance matrix of the variables. The data type of `out` is np.complex128 if either `m` or `y` is complex, otherwise np.float64.

    See Also
    --------
    corrcoef : Normalized covariance matrix

    Examples
    --------
    Consider two variables, :math:`x_0` and :math:`x_1`, which
    correlate perfectly, but in opposite directions:

    >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
    >>> x
    array([[0, 1, 2],
           [2, 1, 0]])

    Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
    matrix shows this clearly:

    >>> np.cov(x)
    array([[ 1., -1.],
           [-1.,  1.]])

    Note that element :math:`C_{0,1}`, which shows the correlation between
    :math:`x_0` and :math:`x_1`, is negative.

    >>> x = np.array([[0, 2], [1, 1], [2, 0]], dtype=np.complex128).T
    >>> x
    array([[ 0.+0.j,  1.+0.j,  2.+0.j],
           [ 2.+0.j,  1.+0.j,  0.+0.j]])
    >>> npcov.cov(x)
    array([[ 1.+0.j, -1.+0.j],
           [-1.+0.j,  1.+0.j]])

    Further, note how `x` and `y` are combined:

    >>> x = [-2.1, -1,  4.3]
    >>> y = [3,  1.1,  0.12]
    >>> X = np.vstack((x,y))
    >>> print np.cov(X)
    [[ 11.71        -4.286     ]
     [ -4.286        2.14413333]]
    >>> print np.cov(x, y)
    [[ 11.71        -4.286     ]
     [ -4.286        2.14413333]]
    >>> print np.cov(x)
    11.71

    """
    # Check inputs
    if ddof is not None and ddof != int(ddof):
        raise ValueError(
            "ddof must be integer")

    # Handles complex arrays too
    m = np.asarray(m)
    if y is None:
        dtype = np.result_type(m, np.float64)
    else:
        y = np.asarray(y)
        dtype = np.result_type(m, y, np.float64)
    X = array(m, ndmin=2, dtype=dtype)

    if X.shape[0] == 1:
        rowvar = 1
    if rowvar:
        N = X.shape[1]
        axis = 0
    else:
        N = X.shape[0]
        axis = 1

    # check ddof
    if ddof is None:
        if bias == 0:
            ddof = 1
        else:
            ddof = 0
    fact = float(N - ddof)
    if fact <= 0:
        warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
        fact = 0.0

    if y is not None:
        y = array(y, copy=False, ndmin=2, dtype=dtype)
        X = concatenate((X, y), axis)

    X -= X.mean(axis=1-axis, keepdims=True)
    if not rowvar:
        return (dot(X.T, X.conj()) / fact).squeeze()
    else:
        return (dot(X, X.T.conj()) / fact).squeeze()
Exemple #23
0
def matrix_power(M, n):
    """
    Raise a square matrix to the (integer) power `n`.

    For positive integers `n`, the power is computed by repeated matrix
    squarings and matrix multiplications. If ``n == 0``, the identity matrix
    of the same shape as M is returned. If ``n < 0``, the inverse
    is computed and then raised to the ``abs(n)``.

    Parameters
    ----------
    M : ndarray or matrix object
        Matrix to be "powered."  Must be square, i.e. ``M.shape == (m, m)``,
        with `m` a positive integer.
    n : int
        The exponent can be any integer or long integer, positive,
        negative, or zero.

    Returns
    -------
    M**n : ndarray or matrix object
        The return value is the same shape and type as `M`;
        if the exponent is positive or zero then the type of the
        elements is the same as those of `M`. If the exponent is
        negative the elements are floating-point.

    Raises
    ------
    LinAlgError
        If the matrix is not numerically invertible.

    See Also
    --------
    matrix
        Provides an equivalent function as the exponentiation operator
        (``**``, not ``^``).

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
    >>> LA.matrix_power(i, 3) # should = -i
    array([[ 0, -1],
           [ 1,  0]])
    >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
    matrix([[ 0, -1],
            [ 1,  0]])
    >>> LA.matrix_power(i, 0)
    array([[1, 0],
           [0, 1]])
    >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
    array([[ 0.,  1.],
           [-1.,  0.]])

    Somewhat more sophisticated example

    >>> q = np.zeros((4, 4))
    >>> q[0:2, 0:2] = -i
    >>> q[2:4, 2:4] = i
    >>> q # one of the three quarternion units not equal to 1
    array([[ 0., -1.,  0.,  0.],
           [ 1.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  1.],
           [ 0.,  0., -1.,  0.]])
    >>> LA.matrix_power(q, 2) # = -np.eye(4)
    array([[-1.,  0.,  0.,  0.],
           [ 0., -1.,  0.,  0.],
           [ 0.,  0., -1.,  0.],
           [ 0.,  0.,  0., -1.]])

    """
    M = asanyarray(M)
    if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
        raise ValueError("input must be a square array")
    if not issubdtype(type(n), int):
        raise TypeError("exponent must be an integer")

    from numpy.linalg import inv

    if n==0:
        M = M.copy()
        M[:] = identity(M.shape[0])
        return M
    elif n<0:
        M = inv(M)
        n *= -1

    result = M
    if n <= 3:
        for _ in range(n-1):
            result=N.dot(result, M)
        return result

    # binary decomposition to reduce the number of Matrix
    # multiplications for n > 3.
    beta = binary_repr(n)
    Z, q, t = M, 0, len(beta)
    while beta[t-q-1] == '0':
        Z = N.dot(Z, Z)
        q += 1
    result = Z
    for k in range(q+1, t):
        Z = N.dot(Z, Z)
        if beta[t-k-1] == '1':
            result = N.dot(result, Z)
    return result
Exemple #24
0
 def __rmul__(self, other):
     return N.dot(other, self)
Exemple #25
0
def main():
    print "dot(3, 4):", dot(3, 4)
Exemple #26
0
def matrix_power(M,n):
    """
    Raise a square matrix to the (integer) power n.

    For positive integers n, the power is computed by repeated matrix
    squarings and matrix multiplications. If n=0, the identity matrix
    of the same type as M is returned. If n<0, the inverse is computed
    and raised to the exponent.

    Parameters
    ----------
    M : array_like
        Must be a square array (that is, of dimension two and with
        equal sizes).
    n : integer
        The exponent can be any integer or long integer, positive
        negative or zero.

    Returns
    -------
    M to the power n
        The return value is a an array the same shape and size as M;
        if the exponent was positive or zero then the type of the
        elements is the same as those of M. If the exponent was negative
        the elements are floating-point.

    Raises
    ------
    LinAlgException
        If the matrix is not numerically invertible, an exception is raised.

    See Also
    --------
    The matrix() class provides an equivalent function as the exponentiation
    operator.

    Examples
    --------
    >>> np.linalg.matrix_power(np.array([[0,1],[-1,0]]),10)
    array([[-1,  0],
           [ 0, -1]])

    """
    M = asanyarray(M)
    if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
        raise ValueError("input must be a square array")
    if not issubdtype(type(n),int):
        raise TypeError("exponent must be an integer")

    from numpy.linalg import inv

    if n==0:
        M = M.copy()
        M[:] = identity(M.shape[0])
        return M
    elif n<0:
        M = inv(M)
        n *= -1

    result = M
    if n <= 3:
        for _ in range(n-1):
            result=N.dot(result,M)
        return result

    # binary decomposition to reduce the number of Matrix
    # multiplications for n > 3.
    beta = binary_repr(n)
    Z,q,t = M,0,len(beta)
    while beta[t-q-1] == '0':
        Z = N.dot(Z,Z)
        q += 1
    result = Z
    for k in range(q+1,t):
        Z = N.dot(Z,Z)
        if beta[t-k-1] == '1':
            result = N.dot(result,Z)
    return result
Exemple #27
0
 def __rmul__(self, other):
     # extract scalars from singleton matrices
     if self.shape == (1,1):
         return N.dot(other, self.flat[0])
     else:
         return N.dot(other, self)
Exemple #28
0
    def Solve(self):
        '''
        This method builds System Matrix and gets Solution
        '''
        if self.SimulationContext.Id != self.NetworkMesh.Id:
            raise self.SimulationContext.XMLIdError()
        try:
            self.TimeStep = self.SimulationContext.Context['timestep']
            self.SquareTimeStep = self.TimeStep*self.TimeStep
        except KeyError:
            print "Error, Please set timestep in Simulation Context XML File"
            raise
        try:
            self.Period = self.SimulationContext.Context['period']
            self.TimeStepFreq = int(self.Period/self.TimeStep)
        except KeyError:
            print "Error, Please set period in Simulation Context XML File"
            raise
        try:
            self.Cycles = self.SimulationContext.Context['cycles']
            self.NumberOfIncrements = (self.Cycles*self.TimeStepFreq)
        except KeyError:
            print "Error, Please set cycles number in Simulation Context XML File"
            raise

        history = []
        assembler = Assembler()
        assembler.SetNetworkMesh(self.NetworkMesh)
        assembler.SetBoundaryConditions(self.BoundaryConditions)
        info = {'dofmap':assembler.DofMap,'solution':None,'incrementNumber':self.IncrementNumber,'history':history}
        self.Evaluator.SetInfo(info)

        self.PrescribedPressures = assembler.AssembleBoundaryConditions(self.SimulationContext)

        self.LinearZeroOrderGlobalMatrix, self.LinearFirstOrderGlobalMatrix, self.LinearSecondOrderGlobalMatrix = \
        assembler.AssembleInit(self.SimulationContext, self.Evaluator)

        self.ZeroOrderGlobalMatrix = assembler.ZeroOrderGlobalMatrix
        self.FirstOrderGlobalMatrix = assembler.FirstOrderGlobalMatrix
        self.SecondOrderGlobalMatrix = assembler.SecondOrderGlobalMatrix

        NumberOfGlobalDofs = assembler.GetNumberOfGlobalDofs()          # number of dofs
        self.UnknownPressures = arange(0,NumberOfGlobalDofs).reshape(NumberOfGlobalDofs,1)          # unknown pressures
        self.UnknownPressures = delete(self.UnknownPressures, s_[self.PrescribedPressures[:,0]], axis=0)
        PressuresMatrix = zeros((NumberOfGlobalDofs, self.NumberOfIncrements))
        self.p = zeros((NumberOfGlobalDofs,1))
        self.pt = zeros((NumberOfGlobalDofs,1))
        self.ptt = zeros((NumberOfGlobalDofs,1))
        self.dp = zeros((NumberOfGlobalDofs,1))
        self.ddp = zeros((NumberOfGlobalDofs,1))
        self.dpt = zeros((NumberOfGlobalDofs,1))
        self.ddpt = zeros((NumberOfGlobalDofs,1))
        self.fe = zeros((NumberOfGlobalDofs,1))
        self.fet = zeros((NumberOfGlobalDofs,1))
        self.dfe = zeros((NumberOfGlobalDofs,1))
        self.dfet = zeros((NumberOfGlobalDofs,1))
        self.fi = zeros((NumberOfGlobalDofs,1))
        self.fit = zeros((NumberOfGlobalDofs,1))
        self.sumv = zeros((NumberOfGlobalDofs,1))
        sumvbk = zeros((NumberOfGlobalDofs,1))
        nonLinear = False
        for el in self.NetworkMesh.Elements:
            if el.IsNonLinear() == True:
                nonLinear = True
                break

        while self.IncrementNumber<=self.NumberOfIncrements:
            icc = (self.IncrementNumber%self.TimeStepFreq)
            if icc == 0:
                icc = self.TimeStepFreq

            #for flow in self.BoundaryConditions.elementFlow:
            for el in self.BoundaryConditions.elementFlow:
              if self.steady == True:
                  self.Flow = assembler.BoundaryConditions.GetSteadyFlow(el, self.TimeStep,icc*self.TimeStep)
              else:
                  self.Flow = assembler.BoundaryConditions.GetTimeFlow(el, icc*self.TimeStep)
              self.fe[assembler.FlowDof[el.Id]]= self.Flow

            CoeffRelax = 0.9
            nltol = self.nltol
            self.pi = None
            pI = None
            sumvbk[:,:] = self.sumv[:,:]
            counter = 0
            while True:
                #Build the algebric equation system for the increment
                SystemMatrix = (2.0/self.TimeStep)*self.SecondOrderGlobalMatrix + self.FirstOrderGlobalMatrix + (self.TimeStep/2.0)*self.ZeroOrderGlobalMatrix    #system matrix
                RightVector = self.fe + (2.0/self.TimeStep)*dot(self.SecondOrderGlobalMatrix,(self.pt)) + dot(self.SecondOrderGlobalMatrix,(self.dpt)) - dot(self.ZeroOrderGlobalMatrix,(self.sumv))-(self.TimeStep/2.0)*dot(self.ZeroOrderGlobalMatrix,(self.pt)) # right hand side vector
                #The reduced (partioned) system of equations is generated.
                RightVector[:,:] = RightVector[:,:] - dot(SystemMatrix[:,self.PrescribedPressures[:,0]],self.PrescribedPressures[:,1:])
                SystemMatrix = SystemMatrix[:,s_[self.UnknownPressures[:,0]]]
                if SystemMatrix.shape[0]> 0.0:
                    SystemMatrix = SystemMatrix[s_[self.UnknownPressures[:,0]],:]
                RightVector = RightVector[s_[self.UnknownPressures[:,0]],:]
                #Unknown nodal point values are solved from this system.
                #  Prescribed nodal values are inserted in the solution vector.
                Solution = solve(SystemMatrix,RightVector) # solutions, unknown pressures
                self.p[self.UnknownPressures,0] = Solution[:,:]
                self.p[self.PrescribedPressures[:,0],0] = self.PrescribedPressures[:,1]
                #Calculating derivatives.
                #Calculating internal nodal flow values.
                self.dp = dot((2.0/self.TimeStep),(self.p-self.pt)) - self.dpt
                self.ddp = dot((4.0/self.SquareTimeStep),(self.p-self.pt)) - dot((4.0/self.TimeStep),self.dpt) -self.ddpt
                self.sumv = sumvbk + dot((self.TimeStep/2.0),(self.pt+self.p))
                self.fi = dot(self.SecondOrderGlobalMatrix,(self.dp)) + dot(self.FirstOrderGlobalMatrix,(self.p)) + dot(self.ZeroOrderGlobalMatrix,(self.sumv))
                if not nonLinear :
                    break

                if self.pi == None:
                    self.pi = zeros((NumberOfGlobalDofs,1))
                    self.pi[:,:] = self.pt[:,:]
                pI = CoeffRelax * self.p + self.pi * (1.0-CoeffRelax)
                self.p[:,:] = pI[:,:]
                den = norm(self.pi,Inf)
                if den < 1e-12:
                    den = 1.0
                nlerr = norm(self.p-self.pi,Inf) / den

                info = {'dofmap':assembler.DofMap,'solution':[self.p, self.pt, self.ptt],'incrementNumber':self.IncrementNumber,'history':history}
                self.Evaluator.SetInfo(info)

                assembler.Assemble(self.SimulationContext, self.Evaluator, self.LinearZeroOrderGlobalMatrix, self.LinearFirstOrderGlobalMatrix, self.LinearSecondOrderGlobalMatrix)
                self.ZeroOrderGlobalMatrix = assembler.ZeroOrderGlobalMatrix
                self.FirstOrderGlobalMatrix = assembler.FirstOrderGlobalMatrix
                self.SecondOrderGlobalMatrix = assembler.SecondOrderGlobalMatrix

                #Dynamic nonlinear relaxing coefficient
                if counter == 100:
                    print "relaxing..."
                    print nlerr, nltol, CoeffRelax
                    counter = 0
                    self.pi[:,:] = None
                    self.sumv[:,:] = sumvbk[:,:]
                    CoeffRelax *= 0.6
                    nltol *= 0.95
                if nlerr < nltol:
                    nltol = self.nltol
                    counter = 0
                    break
                counter+=1
                self.pi[:,:] = self.p[:,:]

            self.ptt[:,:] = self.pt[:,:]
            self.pt[:,:] = self.p[:,:]
            self.dpt[:,:] = self.dp[:,:]
            self.ddpt[:,:] = self.ddp[:,:]
            self.fet[:,:] = self.fe[:,:]
            self.fit[:,:] = self.fi[:,:]
            PressuresMatrix[:,(self.IncrementNumber-1)] = self.p[:,0]
            history.insert(0,self.IncrementNumber)
            history = history[:3]

            if self.steady == True:
                self.MinimumIncrementNumber = 0.01* self.NumberOfIncrements
                if norm(self.fi-self.fe,Inf)<self.convergence and self.IncrementNumber > self.MinimumIncrementNumber:
                    self.IncrementNumber = self.NumberOfIncrements
                else:
                    pass

            if self.IncrementNumber==ceil(0.05*self.NumberOfIncrements):
                print "->5%"
            if self.IncrementNumber==ceil(0.25*self.NumberOfIncrements):
                print "->25%"
            if self.IncrementNumber==ceil(0.5*self.NumberOfIncrements):
                print "->50%"
            if self.IncrementNumber==ceil(0.70*self.NumberOfIncrements):
                print "->70%"
            if self.IncrementNumber==ceil(0.90*self.NumberOfIncrements):
                print "->90%"
            if self.IncrementNumber==ceil(0.99*self.NumberOfIncrements):
                print "->99%"

            self.IncrementNumber = self.IncrementNumber+1
            self.EndIncrementTime = self.EndIncrementTime + self.TimeStep    # increment
        info = {'dofmap':assembler.DofMap,'solution':[self.p, self.pt, self.ptt],'incrementNumber':self.IncrementNumber,'history':history,'allSolution':PressuresMatrix}
        self.Evaluator.SetInfo(info)
        self.Solutions = PressuresMatrix
        return PressuresMatrix
def main():
    print "dot(3, 4):", dot(3, 4)
Exemple #30
0
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
    """
    Estimate a covariance matrix, given data.

    Covariance indicates the level to which two variables vary together.
    If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
    then the covariance matrix element :math:`C_{ij}` is the covariance of
    :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
    of :math:`x_i`.

    Parameters
    ----------
    m : array_like
        A 1-D or 2-D array containing multiple variables and observations.
        Each row of `m` represents a variable, and each column a single
        observation of all those variables. Also see `rowvar` below.
    y : array_like, optional
        An additional set of variables and observations. `y` has the same
        form as that of `m`.
    rowvar : int, optional
        If `rowvar` is non-zero (default), then each row represents a
        variable, with observations in the columns. Otherwise, the relationship
        is transposed: each column represents a variable, while the rows
        contain observations.
    bias : int, optional
        Default normalization is by ``(N - 1)``, where ``N`` is the number of
        observations given (unbiased estimate). If `bias` is 1, then
        normalization is by ``N``. These values can be overridden by using
        the keyword ``ddof`` in numpy versions >= 1.5.
    ddof : int, optional
        .. versionadded:: 1.5
        If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
        the number of observations; this overrides the value implied by
        ``bias``. The default value is ``None``.

    Returns
    -------
    out : ndarray
        The covariance matrix of the variables. The data type of `out` is np.complex128 if either `m` or `y` is complex, otherwise np.float64.

    See Also
    --------
    corrcoef : Normalized covariance matrix

    Examples
    --------
    Consider two variables, :math:`x_0` and :math:`x_1`, which
    correlate perfectly, but in opposite directions:

    >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
    >>> x
    array([[0, 1, 2],
           [2, 1, 0]])

    Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
    matrix shows this clearly:

    >>> np.cov(x)
    array([[ 1., -1.],
           [-1.,  1.]])

    Note that element :math:`C_{0,1}`, which shows the correlation between
    :math:`x_0` and :math:`x_1`, is negative.

    >>> x = np.array([[0, 2], [1, 1], [2, 0]], dtype=np.complex128).T
    >>> x
    array([[ 0.+0.j,  1.+0.j,  2.+0.j],
           [ 2.+0.j,  1.+0.j,  0.+0.j]])
    >>> npcov.cov(x)
    array([[ 1.+0.j, -1.+0.j],
           [-1.+0.j,  1.+0.j]])

    Further, note how `x` and `y` are combined:

    >>> x = [-2.1, -1,  4.3]
    >>> y = [3,  1.1,  0.12]
    >>> X = np.vstack((x,y))
    >>> print np.cov(X)
    [[ 11.71        -4.286     ]
     [ -4.286        2.14413333]]
    >>> print np.cov(x, y)
    [[ 11.71        -4.286     ]
     [ -4.286        2.14413333]]
    >>> print np.cov(x)
    11.71

    """
    # Check inputs
    if ddof is not None and ddof != int(ddof):
        raise ValueError("ddof must be integer")

    # Handles complex arrays too
    m = np.asarray(m)
    if y is None:
        dtype = np.result_type(m, np.float64)
    else:
        y = np.asarray(y)
        dtype = np.result_type(m, y, np.float64)
    X = array(m, ndmin=2, dtype=dtype)

    if X.shape[0] == 1:
        rowvar = 1
    if rowvar:
        N = X.shape[1]
        axis = 0
    else:
        N = X.shape[0]
        axis = 1

    # check ddof
    if ddof is None:
        if bias == 0:
            ddof = 1
        else:
            ddof = 0
    fact = float(N - ddof)
    if fact <= 0:
        warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
        fact = 0.0

    if y is not None:
        y = array(y, copy=False, ndmin=2, dtype=dtype)
        X = concatenate((X, y), axis)

    X -= X.mean(axis=1 - axis, keepdims=True)
    if not rowvar:
        return (dot(X.T, X.conj()) / fact).squeeze()
    else:
        return (dot(X, X.T.conj()) / fact).squeeze()
def matrix_power(M, n):
    """
    Raise a square matrix to the (integer) power `n`.

    For positive integers `n`, the power is computed by repeated matrix
    squarings and matrix multiplications. If ``n == 0``, the identity matrix
    of the same shape as M is returned. If ``n < 0``, the inverse
    is computed and then raised to the ``abs(n)``.

    Parameters
    ----------
    M : ndarray or matrix object
        Matrix to be "powered."  Must be square, i.e. ``M.shape == (m, m)``,
        with `m` a positive integer.
    n : int
        The exponent can be any integer or long integer, positive,
        negative, or zero.

    Returns
    -------
    M**n : ndarray or matrix object
        The return value is the same shape and type as `M`;
        if the exponent is positive or zero then the type of the
        elements is the same as those of `M`. If the exponent is
        negative the elements are floating-point.

    Raises
    ------
    LinAlgError
        If the matrix is not numerically invertible.

    See Also
    --------
    matrix
        Provides an equivalent function as the exponentiation operator
        (``**``, not ``^``).

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
    >>> LA.matrix_power(i, 3) # should = -i
    array([[ 0, -1],
           [ 1,  0]])
    >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
    matrix([[ 0, -1],
            [ 1,  0]])
    >>> LA.matrix_power(i, 0)
    array([[1, 0],
           [0, 1]])
    >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
    array([[ 0.,  1.],
           [-1.,  0.]])

    Somewhat more sophisticated example

    >>> q = np.zeros((4, 4))
    >>> q[0:2, 0:2] = -i
    >>> q[2:4, 2:4] = i
    >>> q # one of the three quaternion units not equal to 1
    array([[ 0., -1.,  0.,  0.],
           [ 1.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  1.],
           [ 0.,  0., -1.,  0.]])
    >>> LA.matrix_power(q, 2) # = -np.eye(4)
    array([[-1.,  0.,  0.,  0.],
           [ 0., -1.,  0.,  0.],
           [ 0.,  0., -1.,  0.],
           [ 0.,  0.,  0., -1.]])

    """
    M = asanyarray(M)
    if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
        raise ValueError("input must be a square array")
    if not issubdtype(type(n), int):
        raise TypeError("exponent must be an integer")

    from numpy.linalg import inv

    if n == 0:
        M = M.copy()
        M[:] = identity(M.shape[0])
        return M
    elif n < 0:
        M = inv(M)
        n *= -1

    result = M
    if n <= 3:
        for _ in range(n - 1):
            result = N.dot(result, M)
        return result

    # binary decomposition to reduce the number of Matrix
    # multiplications for n > 3.
    beta = binary_repr(n)
    Z, q, t = M, 0, len(beta)
    while beta[t - q - 1] == '0':
        Z = N.dot(Z, Z)
        q += 1
    result = Z
    for k in range(q + 1, t):
        Z = N.dot(Z, Z)
        if beta[t - k - 1] == '1':
            result = N.dot(result, Z)
    return result
Exemple #32
0
 def _followxSingleDirection(  self, 
                               x, 
                               direction = Direction.FORWARD,
                               forward_curve = None,
                               last_eigenvector = None, 
                               weights = 1.):
   '''Generates a partial lpc curve dictionary from the start point, x.
   Arguments
   ---------
   x : 1-dim, length m, numpy.array of floats, start point for the algorithm when m is dimension of feature space
   
   direction :  bool, proceeds in Direction.FORWARD or Direction.BACKWARD from this point (just sets sign for first eigenvalue) 
   
   forward_curve : dictionary as returned by this function, is used to detect crossing of the curve under construction with a
       previously constructed curve
       
   last_eigenvector : 1-dim, length m, numpy.array of floats, a unit vector that defines the initial direction, relative to
       which the first eigenvector is biased and initial cos_neu_neu is calculated  
       
   weights : 1-dim, length n numpy.array of observation weights (can also be used to exclude
       individual observations from the computation by setting their weight to zero.),
       where n is the number of feature points 
   '''
   x0 = copy(x)
   N = self.Xi.shape[0]
   d = self.Xi.shape[1]
   it = self._lpcParameters['it']
   h = array(self._lpcParameters['h'])
   t0 = self._lpcParameters['t0']
   rho0 = self._lpcParameters['rho0']
   
   save_xd = empty((it,d))
   eigen_vecd = empty((it,d))
   c0 = ones(it)
   cos_alt_neu = ones(it)
   cos_neu_neu = ones(it)    
   lamb = empty(it) #NOTE this is named 'lambda' in the original R code
   rho = zeros(it)
   high_rho_points = empty((0,d))    
   count_points = 0
   
   for i in range(it):
     kernel_weights = self._kernd(self.Xi, x0, c0[i]*h) * weights
     mu_x = average(self.Xi, axis = 0, weights = kernel_weights)
     sum_weights = sum(kernel_weights)
     mean_sub = self.Xi - mu_x 
     cov_x = dot( dot(transpose(mean_sub), numpy.diag(kernel_weights)), mean_sub) / sum_weights 
     #assert (abs(cov_x.transpose() - cov_x)/abs(cov_x.transpose() + cov_x) < 1e-6).all(), 'Covariance matrix not symmetric, \n cov_x = {0}, mean_sub = {1}'.format(cov_x, mean_sub)
     save_xd[i] = mu_x #save first point of the branch
     count_points += 1
     
     #calculate path length
     if i==0:
       lamb[0] = 0
     else:
       lamb[i] = lamb[i-1] + sqrt(sum((mu_x - save_xd[i-1])**2))
     
     #calculate eigenvalues/vectors
     #(sorted_eigen_cov is a list of tuples containing eigenvalue and associated eigenvector, sorted descending by eigenvalue)
     eigen_cov = eigh(cov_x)
     sorted_eigen_cov = zip(eigen_cov[0],map(ravel,vsplit(eigen_cov[1].transpose(),len(eigen_cov[1]))))
     sorted_eigen_cov.sort(key = lambda elt: elt[0], reverse = True)   
     eigen_norm = sqrt(sum(sorted_eigen_cov[0][1]**2))
     eigen_vecd[i] = direction * sorted_eigen_cov[0][1] / eigen_norm  #Unit eigenvector corresponding to largest eigenvalue
     
     #rho parameters
     rho[i] = sorted_eigen_cov[1][0] / sorted_eigen_cov[0][0] #Ratio of two largest eigenvalues
     if i != 0 and rho[i] > rho0 and rho[i-1] <= rho0:
       high_rho_points = vstack((high_rho_points, x0))
     
     #angle between successive eigenvectors
     if i==0 and last_eigenvector is not None:
       cos_alt_neu[i] = direction * dot(last_eigenvector, eigen_vecd[i])
     if i > 0:
       cos_alt_neu[i] = dot(eigen_vecd[i], eigen_vecd[i-1])
     
     #signum flipping
     if cos_alt_neu[i] < 0:
       eigen_vecd[i] = -eigen_vecd[i]
       cos_neu_neu[i] = -cos_alt_neu[i]
     else:
       cos_neu_neu[i] = cos_alt_neu[i]
    
     #angle penalization
     pen = self._lpcParameters['pen']
     if pen > 0:
       if i == 0 and last_eigenvector is not None:
         a = abs(cos_alt_neu[i])**pen
         eigen_vecd[i] = a * eigen_vecd[i] + (1-a) * last_eigenvector
       if i > 0:
         a = abs(cos_alt_neu[i])**pen
         eigen_vecd[i] = a * eigen_vecd[i] + (1-a) * eigen_vecd[i-1]
             
     #check curve termination criteria
     if i not in (0, it-1):
       #crossing
       cross = self._lpcParameters['cross']
       if forward_curve is None:
         full_curve_points = save_xd[0:i+1]
       else:
         full_curve_points = vstack((forward_curve['save_xd'],save_xd[0:i+1])) #inefficient, initialize then append? 
       if not cross:
         prox = where(ravel(cdist(full_curve_points,[mu_x])) <= mean(h))[0]
         if len(prox) != max(prox) - min(prox) + 1:
           break
         
       #convergence
       convergence_at = self._lpcParameters['convergence_at']
       conv_ratio = abs(lamb[i] - lamb[i-1]) / (2 * (lamb[i] + lamb[i-1]))
       if conv_ratio  < convergence_at:
         break
       
       #boundary
       boundary = self._lpcParameters['boundary']
       if conv_ratio < boundary:
         c0[i+1] = 0.995 * c0[i]
       else:
         c0[i+1] = min(1.01*c0[i], 1)
     
     #step along in direction eigen_vecd[i]
     x0 = mu_x + t0 * eigen_vecd[i]
   
   #trim output in the case where convergence occurs before 'it' iterations    
   curve = { 'save_xd': save_xd[0:count_points],
             'eigen_vecd': eigen_vecd[0:count_points],
             'cos_neu_neu': cos_neu_neu[0:count_points],
             'rho': rho[0:count_points],
             'high_rho_points': high_rho_points,
             'lamb': lamb[0:count_points],
             'c0': c0[0:count_points]
           }
   return curve  
 def __rmul__(self, other):
     return N.dot(other, self)
Exemple #34
0
    def Solve(self):
        '''
        This method builds System Matrix and gets Solution
        '''
        if self.SimulationContext.Id != self.NetworkMesh.Id:
            raise self.SimulationContext.XMLIdError()
        try:
            self.TimeStep = self.SimulationContext.Context['timestep']
            self.SquareTimeStep = self.TimeStep * self.TimeStep
        except KeyError:
            print "Error, Please set timestep in Simulation Context XML File"
            raise
        try:
            self.Period = self.SimulationContext.Context['period']
            self.TimeStepFreq = int(self.Period / self.TimeStep)
        except KeyError:
            print "Error, Please set period in Simulation Context XML File"
            raise
        try:
            self.Cycles = self.SimulationContext.Context['cycles']
            self.NumberOfIncrements = (self.Cycles * self.TimeStepFreq)
        except KeyError:
            print "Error, Please set cycles number in Simulation Context XML File"
            raise

        history = []
        assembler = Assembler()
        assembler.SetNetworkMesh(self.NetworkMesh)
        assembler.SetBoundaryConditions(self.BoundaryConditions)
        info = {
            'dofmap': assembler.DofMap,
            'solution': None,
            'incrementNumber': self.IncrementNumber,
            'history': history
        }
        self.Evaluator.SetInfo(info)

        self.PrescribedPressures = assembler.AssembleBoundaryConditions(
            self.SimulationContext)

        self.LinearZeroOrderGlobalMatrix, self.LinearFirstOrderGlobalMatrix, self.LinearSecondOrderGlobalMatrix = \
        assembler.AssembleInit(self.SimulationContext, self.Evaluator)

        self.ZeroOrderGlobalMatrix = assembler.ZeroOrderGlobalMatrix
        self.FirstOrderGlobalMatrix = assembler.FirstOrderGlobalMatrix
        self.SecondOrderGlobalMatrix = assembler.SecondOrderGlobalMatrix

        NumberOfGlobalDofs = assembler.GetNumberOfGlobalDofs(
        )  # number of dofs
        self.UnknownPressures = arange(0, NumberOfGlobalDofs).reshape(
            NumberOfGlobalDofs, 1)  # unknown pressures
        self.UnknownPressures = delete(self.UnknownPressures,
                                       s_[self.PrescribedPressures[:, 0]],
                                       axis=0)
        PressuresMatrix = zeros((NumberOfGlobalDofs, self.NumberOfIncrements))
        self.p = zeros((NumberOfGlobalDofs, 1))
        self.pt = zeros((NumberOfGlobalDofs, 1))
        self.ptt = zeros((NumberOfGlobalDofs, 1))
        self.dp = zeros((NumberOfGlobalDofs, 1))
        self.ddp = zeros((NumberOfGlobalDofs, 1))
        self.dpt = zeros((NumberOfGlobalDofs, 1))
        self.ddpt = zeros((NumberOfGlobalDofs, 1))
        self.fe = zeros((NumberOfGlobalDofs, 1))
        self.fet = zeros((NumberOfGlobalDofs, 1))
        self.dfe = zeros((NumberOfGlobalDofs, 1))
        self.dfet = zeros((NumberOfGlobalDofs, 1))
        self.fi = zeros((NumberOfGlobalDofs, 1))
        self.fit = zeros((NumberOfGlobalDofs, 1))
        self.sumv = zeros((NumberOfGlobalDofs, 1))
        sumvbk = zeros((NumberOfGlobalDofs, 1))
        nonLinear = False
        for el in self.NetworkMesh.Elements:
            if el.IsNonLinear() == True:
                nonLinear = True
                break

        while self.IncrementNumber <= self.NumberOfIncrements:
            icc = (self.IncrementNumber % self.TimeStepFreq)
            if icc == 0:
                icc = self.TimeStepFreq

            #for flow in self.BoundaryConditions.elementFlow:
            for el in self.BoundaryConditions.elementFlow:
                if self.steady == True:
                    self.Flow = assembler.BoundaryConditions.GetSteadyFlow(
                        el, self.TimeStep, icc * self.TimeStep)
                else:
                    self.Flow = assembler.BoundaryConditions.GetTimeFlow(
                        el, icc * self.TimeStep)
                self.fe[assembler.FlowDof[el.Id]] = self.Flow

            CoeffRelax = 0.9
            nltol = self.nltol
            self.pi = None
            pI = None
            sumvbk[:, :] = self.sumv[:, :]
            counter = 0
            while True:
                #Build the algebric equation system for the increment
                SystemMatrix = (
                    2.0 / self.TimeStep
                ) * self.SecondOrderGlobalMatrix + self.FirstOrderGlobalMatrix + (
                    self.TimeStep /
                    2.0) * self.ZeroOrderGlobalMatrix  #system matrix
                RightVector = self.fe + (2.0 / self.TimeStep) * dot(
                    self.SecondOrderGlobalMatrix, (self.pt)) + dot(
                        self.SecondOrderGlobalMatrix, (self.dpt)) - dot(
                            self.ZeroOrderGlobalMatrix,
                            (self.sumv)) - (self.TimeStep / 2.0) * dot(
                                self.ZeroOrderGlobalMatrix,
                                (self.pt))  # right hand side vector
                #The reduced (partioned) system of equations is generated.
                RightVector[:, :] = RightVector[:, :] - dot(
                    SystemMatrix[:, self.PrescribedPressures[:, 0]],
                    self.PrescribedPressures[:, 1:])
                SystemMatrix = SystemMatrix[:, s_[self.UnknownPressures[:, 0]]]
                if SystemMatrix.shape[0] > 0.0:
                    SystemMatrix = SystemMatrix[
                        s_[self.UnknownPressures[:, 0]], :]
                RightVector = RightVector[s_[self.UnknownPressures[:, 0]], :]
                #Unknown nodal point values are solved from this system.
                #  Prescribed nodal values are inserted in the solution vector.
                Solution = solve(SystemMatrix,
                                 RightVector)  # solutions, unknown pressures
                self.p[self.UnknownPressures, 0] = Solution[:, :]
                self.p[self.PrescribedPressures[:, 0],
                       0] = self.PrescribedPressures[:, 1]
                #Calculating derivatives.
                #Calculating internal nodal flow values.
                self.dp = dot((2.0 / self.TimeStep),
                              (self.p - self.pt)) - self.dpt
                self.ddp = dot((4.0 / self.SquareTimeStep),
                               (self.p - self.pt)) - dot(
                                   (4.0 / self.TimeStep), self.dpt) - self.ddpt
                self.sumv = sumvbk + dot((self.TimeStep / 2.0),
                                         (self.pt + self.p))
                self.fi = dot(self.SecondOrderGlobalMatrix, (self.dp)) + dot(
                    self.FirstOrderGlobalMatrix,
                    (self.p)) + dot(self.ZeroOrderGlobalMatrix, (self.sumv))
                if not nonLinear:
                    break

                if self.pi == None:
                    self.pi = zeros((NumberOfGlobalDofs, 1))
                    self.pi[:, :] = self.pt[:, :]
                pI = CoeffRelax * self.p + self.pi * (1.0 - CoeffRelax)
                self.p[:, :] = pI[:, :]
                den = norm(self.pi, Inf)
                if den < 1e-12:
                    den = 1.0
                nlerr = norm(self.p - self.pi, Inf) / den

                info = {
                    'dofmap': assembler.DofMap,
                    'solution': [self.p, self.pt, self.ptt],
                    'incrementNumber': self.IncrementNumber,
                    'history': history
                }
                self.Evaluator.SetInfo(info)

                assembler.Assemble(self.SimulationContext, self.Evaluator,
                                   self.LinearZeroOrderGlobalMatrix,
                                   self.LinearFirstOrderGlobalMatrix,
                                   self.LinearSecondOrderGlobalMatrix)
                self.ZeroOrderGlobalMatrix = assembler.ZeroOrderGlobalMatrix
                self.FirstOrderGlobalMatrix = assembler.FirstOrderGlobalMatrix
                self.SecondOrderGlobalMatrix = assembler.SecondOrderGlobalMatrix

                #Dynamic nonlinear relaxing coefficient
                if counter == 100:
                    print "relaxing..."
                    print nlerr, nltol, CoeffRelax
                    counter = 0
                    self.pi[:, :] = None
                    self.sumv[:, :] = sumvbk[:, :]
                    CoeffRelax *= 0.6
                    nltol *= 0.95
                if nlerr < nltol:
                    nltol = self.nltol
                    counter = 0
                    break
                counter += 1
                self.pi[:, :] = self.p[:, :]

            self.ptt[:, :] = self.pt[:, :]
            self.pt[:, :] = self.p[:, :]
            self.dpt[:, :] = self.dp[:, :]
            self.ddpt[:, :] = self.ddp[:, :]
            self.fet[:, :] = self.fe[:, :]
            self.fit[:, :] = self.fi[:, :]
            PressuresMatrix[:, (self.IncrementNumber - 1)] = self.p[:, 0]
            history.insert(0, self.IncrementNumber)
            history = history[:3]

            if self.steady == True:
                self.MinimumIncrementNumber = 0.01 * self.NumberOfIncrements
                if norm(
                        self.fi - self.fe, Inf
                ) < self.convergence and self.IncrementNumber > self.MinimumIncrementNumber:
                    self.IncrementNumber = self.NumberOfIncrements
                else:
                    pass

            if self.IncrementNumber == ceil(0.05 * self.NumberOfIncrements):
                print "->5%"
            if self.IncrementNumber == ceil(0.25 * self.NumberOfIncrements):
                print "->25%"
            if self.IncrementNumber == ceil(0.5 * self.NumberOfIncrements):
                print "->50%"
            if self.IncrementNumber == ceil(0.70 * self.NumberOfIncrements):
                print "->70%"
            if self.IncrementNumber == ceil(0.90 * self.NumberOfIncrements):
                print "->90%"
            if self.IncrementNumber == ceil(0.99 * self.NumberOfIncrements):
                print "->99%"

            self.IncrementNumber = self.IncrementNumber + 1
            self.EndIncrementTime = self.EndIncrementTime + self.TimeStep  # increment
        info = {
            'dofmap': assembler.DofMap,
            'solution': [self.p, self.pt, self.ptt],
            'incrementNumber': self.IncrementNumber,
            'history': history,
            'allSolution': PressuresMatrix
        }
        self.Evaluator.SetInfo(info)
        self.Solutions = PressuresMatrix
        return PressuresMatrix