def cluster_vectorspace(self, vectors, trace=False):
        assert len(vectors) > 0

        # set the parameters to initial values
        dimensions = len(vectors[0])
        means = self._means
        priors = self._priors
        if not priors:
            priors = self._priors = numarray.ones(self._num_clusters,
                                        numarray.Float64) / self._num_clusters
        covariances = self._covariance_matrices 
        if not covariances:
            covariances = self._covariance_matrices = \
                [ numarray.identity(dimensions, numarray.Float64) 
                  for i in range(self._num_clusters) ]
            
        # do the E and M steps until the likelihood plateaus
        lastl = self._loglikelihood(vectors, priors, means, covariances)
        converged = False

        while not converged:
            if trace: print 'iteration; loglikelihood', lastl
            # E-step, calculate hidden variables, h[i,j]
            h = numarray.zeros((len(vectors), self._num_clusters),
                numarray.Float64)
            for i in range(len(vectors)):
                for j in range(self._num_clusters):
                    h[i,j] = priors[j] * self._gaussian(means[j],
                                               covariances[j], vectors[i])
                h[i,:] /= sum(h[i,:])

            # M-step, update parameters - cvm, p, mean
            for j in range(self._num_clusters):
                covariance_before = covariances[j]
                new_covariance = numarray.zeros((dimensions, dimensions),
                            numarray.Float64)
                new_mean = numarray.zeros(dimensions, numarray.Float64)
                sum_hj = 0.0
                for i in range(len(vectors)):
                    delta = vectors[i] - means[j]
                    new_covariance += h[i,j] * \
                        numarray.multiply.outer(delta, delta)
                    sum_hj += h[i,j]
                    new_mean += h[i,j] * vectors[i]
                covariances[j] = new_covariance / sum_hj
                means[j] = new_mean / sum_hj
                priors[j] = sum_hj / len(vectors)

                # bias term to stop covariance matrix being singular
                covariances[j] += self._bias * \
                    numarray.identity(dimensions, numarray.Float64)

            # calculate likelihood - FIXME: may be broken
            l = self._loglikelihood(vectors, priors, means, covariances)

            # check for convergence
            if abs(lastl - l) < self._conv_threshold:
                converged = True
            lastl = l
Example #2
0
def powell(F,x,h=0.1,tol=1.0e-6):
    
    def f(s): return F(x + s*v)    # F in direction of v

    n = len(x)                     # Number of design variables
    df = zeros((n),type=Float64)   # Decreases of F stored here
    u = identity(n)*1.0            # Vectors v stored here by rows
    for j in range(30):            # Allow for 30 cycles:
        xOld = x.copy()            # Save starting point
        fOld = F(xOld)
      # First n line searches record decreases of F
        for i in range(n):
            v = u[i]
            a,b = bracket(f,0.0,h)
            s,fMin = search(f,a,b)
            df[i] = fOld - fMin
            fOld = fMin
            x = x + s*v
      # Last line search in the cycle    
        v = x - xOld
        a,b = bracket(f,0.0,h)
        s,fLast = search(f,a,b)
        x = x + s*v
      # Check for convergence
        if sqrt(dot(x-xOld,x-xOld)/n) < tol: return x,j+1
      # Identify biggest decrease & update search directions
        iMax = int(argmax(df))
        for i in range(iMax,n-1):
            u[i] = u[i+1]
            u[n-1] = v
    print "Powell did not converge"        
    def cluster(self, tokens, assign_clusters=False, trace=False):
        assert chktype(1, tokens, [Token])
        assert chktype(2, assign_clusters, bool)
        assert chktype(3, trace, bool)
        assert len(tokens) > 0
        vectors = map(lambda tk: tk['FEATURES'], tokens)

        # normalise the vectors
        if self._should_normalise:
            vectors = map(self._normalise, vectors)

        # use SVD to reduce the dimensionality
        if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
            [u, d, vt] = numarray.linear_algebra.singular_value_decomposition(
                            numarray.transpose(numarray.array(vectors)))
            S = d[:self._svd_dimensions] * \
                numarray.identity(self._svd_dimensions, numarray.Float64)
            T = u[:,:self._svd_dimensions]
            Dt = vt[:self._svd_dimensions,:]
            vectors = numarray.transpose(numarray.matrixmultiply(S, Dt))
            self._Tt = numarray.transpose(T)
            
        # call abstract method to cluster the vectors
        self.cluster_vectorspace(vectors, trace)

        # assign the tokens to clusters
        if assign_clusters:
            for token in tokens:
                self.classify(token)
Example #4
0
def matInv(a):
    n = len(a[0])
    aInv = identity(n)*1.0
    a,seq = LUdecomp(a)
    for i in range(n):
        aInv[:,i] = LUsolve(a,aInv[:,i],seq)
    return aInv    
Example #5
0
class _Matrix(NumArray):
    def _rc(self, a):
        if len(shape(a)) == 0:
            return a
        else:
            return Matrix(a)

    def __mul__(self, other):
        aother = asarray(other)
        if len(aother.shape) == 0:
            return self._rc(self * aother)
        else:
            return self._rc(_dot(self, aother))

    def __rmul__(self, other):
        aother = asarray(other)
        if len(aother.shape) == 0:
            return self._rc(aother * self)
        else:
            return self._rc(_dot(aother, self))

    def __imul__(self, other):
        aother = asarray(other)
        self[:] = _dot(self, aother)
        return self

    def __pow__(self, other):
        shape = self.shape
        if len(shape) != 2 or shape[0] != shape[1]:
            raise TypeError, "matrix is not square"
        if type(other) not in (type(1), type(1L)):
            raise TypeError, "exponent must be an integer"
        if other == 0:
            return Matrix(identity(shape[0]))
        if other < 0:
            result = Matrix(LinearAlgebra.inverse(self))
            x = Matrix(result)
            other = -other
        else:
            result = self
            x = result
        if other <= 3:
            while (other > 1):
                result = result * x
                other = other - 1
            return result
        # binary decomposition to reduce the number of Matrix
        #  Multiplies for other > 3.
        beta = _binary(other)
        t = len(beta)
        Z, q = x.copy(), 0
        while beta[t - q - 1] == '0':
            Z *= Z
            q += 1
        result = Z.copy()
        for k in range(q + 1, t):
            Z *= Z
            if beta[t - k - 1] == '1':
                result *= Z
        return result
Example #6
0
def powell(F, x, h=0.1, tol=1.0e-6):
    def f(s):
        return F(x + s * v)  # F in direction of v

    n = len(x)  # Number of design variables
    df = zeros((n), type=Float64)  # Decreases of F stored here
    u = identity(n) * 1.0  # Vectors v stored here by rows
    for j in range(30):  # Allow for 30 cycles:
        xOld = x.copy()  # Save starting point
        fOld = F(xOld)
        # First n line searches record decreases of F
        for i in range(n):
            v = u[i]
            a, b = bracket(f, 0.0, h)
            s, fMin = search(f, a, b)
            df[i] = fOld - fMin
            fOld = fMin
            x = x + s * v
    # Last line search in the cycle
        v = x - xOld
        a, b = bracket(f, 0.0, h)
        s, fLast = search(f, a, b)
        x = x + s * v
        # Check for convergence
        if sqrt(dot(x - xOld, x - xOld) / n) < tol: return x, j + 1
        # Identify biggest decrease & update search directions
        iMax = int(argmax(df))
        for i in range(iMax, n - 1):
            u[i] = u[i + 1]
            u[n - 1] = v
    print "Powell did not converge"
Example #7
0
 def __pow__(self, other):
     shape = self.shape
     if len(shape)!=2 or shape[0]!=shape[1]:
         raise TypeError, "matrix is not square"
     if type(other) not in (type(1), type(1L)):
         raise TypeError, "exponent must be an integer"        
     if other==0:
         return Matrix(identity(shape[0]))
     if other<0:
         result=Matrix(LinearAlgebra.inverse(self))
         x=Matrix(result)
         other=-other
     else:
         result=self
         x=result
     if other <= 3:
         while(other>1):
             result=result*x
             other=other-1
         return result
     # binary decomposition to reduce the number of Matrix
     #  Multiplies for other > 3.
     beta = _binary(other)            
     t = len(beta)
     Z,q = x.copy(),0
     while beta[t-q-1] == '0':
         Z *= Z
         q += 1
     result = Z.copy()
     for k in range(q+1,t):
         Z *= Z
         if beta[t-k-1] == '1':
             result *= Z
     return result
Example #8
0
def inverse(a):
    """inverse(a) -> inverse matrix of a
    
    *a* may be either rank-2 or rank-3. If it is rank-2, it must square.
    
    >>> A = [[1,2,3], [3,5,5], [5,6,7]]
    >>> Ainv = inverse(A)
    >>> _isClose(na.dot(A, Ainv), na.identity(3))
    1
    
    If *a* is rank-3, it is treated as an array of rank-2 matrices and
    must be square along the last 2 axes.
    
    >>> A = [[[1, 3], [2j, 3j]],
    ...      [[2, 4], [4j, 4j]],
    ...      [[3, 5], [6j, 5j]]]
    >>> Ainv = inverse(A)
    >>> _isClose(map(na.dot, A, Ainv), [na.identity(2)]*3)
    1
    
    If *a* is not square along its last two axes, a LinAlgError is raised.
    
    >>> inverse(na.asarray(A)[...,:1])
    Traceback (most recent call last):
       ...
    LinearAlgebraError: Array (or it submatrices) must be square
    
    """
    a = na.asarray(a)
    I = na.identity(a.shape[-2])
    if len(a.shape) == 3:
        I.shape = (1,) + I.shape
    return solve_linear_equations(a, I)
Example #9
0
def computeP(a): 
    n = len(a)
    p = identity(n)*1.0
    for k in range(n-2):
        u = a[k+1:n,k]
        h = dot(u,u)/2.0
        v = matrixmultiply(p[1:n,k+1:n],u)/h           
        p[1:n,k+1:n] = p[1:n,k+1:n] - outerproduct(v,u)
    return p
Example #10
0
def computeP(a):
    n = len(a)
    p = identity(n) * 1.0
    for k in range(n - 2):
        u = a[k + 1:n, k]
        h = dot(u, u) / 2.0
        v = matrixmultiply(p[1:n, k + 1:n], u) / h
        p[1:n, k + 1:n] = p[1:n, k + 1:n] - outerproduct(v, u)
    return p
Example #11
0
def jacobi(a, tol=1.0e-9):  # Jacobi method
    def maxElem(a):  # Find largest off-diag. element a[k,l]
        n = len(a)
        aMax = 0.0
        for i in range(n - 1):
            for j in range(i + 1, n):
                if abs(a[i, j]) >= aMax:
                    aMax = abs(a[i, j])
                    k = i
                    l = j
        return aMax, k, l

    def rotate(a, p, k, l):  # Rotate to make a[k,l] = 0
        n = len(a)
        aDiff = a[l, l] - a[k, k]
        if abs(a[k, l]) < abs(aDiff) * 1.0e-36: t = a[k, l] / aDiff
        else:
            phi = aDiff / (2.0 * a[k, l])
            t = 1.0 / (abs(phi) + sqrt(phi**2 + 1.0))
            if phi < 0.0: t = -t
        c = 1.0 / sqrt(t**2 + 1.0)
        s = t * c
        tau = s / (1.0 + c)
        temp = a[k, l]
        a[k, l] = 0.0
        a[k, k] = a[k, k] - t * temp
        a[l, l] = a[l, l] + t * temp
        for i in range(k):  # Case of i < k
            temp = a[i, k]
            a[i, k] = temp - s * (a[i, l] + tau * temp)
            a[i, l] = a[i, l] + s * (temp - tau * a[i, l])
        for i in range(k + 1, l):  # Case of k < i < l
            temp = a[k, i]
            a[k, i] = temp - s * (a[i, l] + tau * a[k, i])
            a[i, l] = a[i, l] + s * (temp - tau * a[i, l])
        for i in range(l + 1, n):  # Case of i > l
            temp = a[k, i]
            a[k, i] = temp - s * (a[l, i] + tau * temp)
            a[l, i] = a[l, i] + s * (temp - tau * a[l, i])
        for i in range(n):  # Update transformation matrix
            temp = p[i, k]
            p[i, k] = temp - s * (p[i, l] + tau * p[i, k])
            p[i, l] = p[i, l] + s * (temp - tau * p[i, l])

    n = len(a)
    maxRot = 5 * (n**2)  # Set limit on number of rotations
    p = identity(n) * 1.0  # Initialize transformation matrix
    for i in range(maxRot):  # Jacobi rotation loop
        aMax, k, l = maxElem(a)
        if aMax < tol: return diagonal(a), p
        rotate(a, p, k, l)
    print 'Jacobi method did not converge'
Example #12
0
def jacobi(a,tol = 1.0e-9): # Jacobi method

    def maxElem(a): # Find largest off-diag. element a[k,l]
        n = len(a)
        aMax = 0.0
        for i in range(n-1):
            for j in range(i+1,n):
                if abs(a[i,j]) >= aMax:
                    aMax = abs(a[i,j])
                    k = i; l = j
        return aMax,k,l

    def rotate(a,p,k,l): # Rotate to make a[k,l] = 0
        n = len(a)
        aDiff = a[l,l] - a[k,k]
        if abs(a[k,l]) < abs(aDiff)*1.0e-36: t = a[k,l]/aDiff
        else:
            phi = aDiff/(2.0*a[k,l])
            t = 1.0/(abs(phi) + sqrt(phi**2 + 1.0))
            if phi < 0.0: t = -t
        c = 1.0/sqrt(t**2 + 1.0); s = t*c
        tau = s/(1.0 + c)
        temp = a[k,l]
        a[k,l] = 0.0
        a[k,k] = a[k,k] - t*temp
        a[l,l] = a[l,l] + t*temp
        for i in range(k):      # Case of i < k
            temp = a[i,k]
            a[i,k] = temp - s*(a[i,l] + tau*temp)
            a[i,l] = a[i,l] + s*(temp - tau*a[i,l])
        for i in range(k+1,l):  # Case of k < i < l
            temp = a[k,i]
            a[k,i] = temp - s*(a[i,l] + tau*a[k,i])
            a[i,l] = a[i,l] + s*(temp - tau*a[i,l])
        for i in range(l+1,n):  # Case of i > l
            temp = a[k,i]
            a[k,i] = temp - s*(a[l,i] + tau*temp)
            a[l,i] = a[l,i] + s*(temp - tau*a[l,i])
        for i in range(n):      # Update transformation matrix
            temp = p[i,k]
            p[i,k] = temp - s*(p[i,l] + tau*p[i,k])
            p[i,l] = p[i,l] + s*(temp - tau*p[i,l])
        
    n = len(a)
    maxRot = 5*(n**2)       # Set limit on number of rotations
    p = identity(n)*1.0     # Initialize transformation matrix
    for i in range(maxRot): # Jacobi rotation loop 
        aMax,k,l = maxElem(a)
        if aMax < tol: return diagonal(a),p
        rotate(a,p,k,l)
    print 'Jacobi method did not converge'
    def setParameters(self, mu = None, sigma = None, wi = None, sigma_type = 'full', \
                      tied_sigma = False, isAdjustable = False):
        #============================================================
        # set the mean :
        # self.mean[i] = the mean for dimension i
        # self.mean.shape = (self.nvalues, q1,q2,...,qn)
        #        where qi is the size of discrete parent i
        if mu == None:
            # set all mu to zeros
            mu = na.zeros(shape=([self.nvalues]+self.discrete_parents_shape), \
                          type='Float32')
        try:
            mu = na.array(shape=[self.nvalues]+self.discrete_parents_shape, \
                          type='Float32')
        except:
            raise 'Could not convert mu to numarray of shape : %s, discrete parents = %s' %(str(self.discrete_parents_shape),
                                                                                            str([dp.name for dp in self.discrete_parents]))
        self.mean = mu

        #============================================================
        # set the covariance :
        # self.sigma[i,j] = the covariance between dimension i and j
        # self.sigma.shape = (nvalues,nvalues,q1,q2,...,qn)
        #        where qi is the size of discrete parent i
        if sigma == None:
            eye = na.identity(self.nvalues, type = 'Float32')[...,na.NewAxis]
            if len(self.discrete_parents) > 0:
                q = reduce(lambda a,b:a*b,self.discrete_parents_shape) # number of different configurations for the parents
                sigma = na.concatenate([eye]*q, axis=2)
                sigma = na.array(sigma,shape=[self.nvalues,self.nvalues]+self.discrete_parents_shape) 
        try:
            sigma = na.array(sigma, shape=[self.nvalues,self.nvalues]+self.discrete_parents_shape, type='Float32')
        except:
            raise 'Not a valid covariance matrix'
        self.sigma = sigma

        #============================================================
        # set the weights :
        # self.weights[i,j] = the regression for dimension i and continuous parent j
        # self.weights.shape = (nvalues,x1,x2,...,xn,q1,q2,...,qn)
        #        where xi is the size of continuous parent i)
        #        and qi is the size of discrete parent i
        
        if wi == None:
            wi = na.ones(shape=[self.nvalues]+self.parents_shape, type='Float32') 
        try:
            wi = na.array(wi, shape=[self.nvalues]+self.parents_shape, type='Float32')
        except:
            raise 'Not a valid weight'
        self.weights = wi
    def __init__(self, v, mu = None, sigma = None, wi = None, \
                   sigma_type = 'full', tied_sigma = False, \
                   isAdjustable = True, ignoreFamily = False):

        Distribution.__init__(self, v, isAdjustable=isAdjustable, \
                              ignoreFamily=ignoreFamily)
        self.distribution_type = 'Gaussian'

        # check that current node is continuous
        if v.discrete:
            raise 'Node must be continuous'

        self.discrete_parents = [parent for parent in self.parents \
                                 if parent.discrete]
        self.continuous_parents = [parent for parent in self.parents \
                                   if not parent.discrete]

        self.discrete_parents_shape = [dp.nvalues for dp \
                                       in self.discrete_parents]
        self.parents_shape = [p.nvalues for p in self.parents]
        if not self.parents_shape:
            self.parents_shape = [0]

        # set defaults
        # set all mu to zeros
        self.mean = na.zeros(shape=([self.nvalues] + \
                             self.discrete_parents_shape), type='Float32')

        # set sigma to ones along the diagonal	
        eye = na.identity(self.nvalues, type = 'Float32')[..., na.NewAxis]
        if len(self.discrete_parents) > 0:            
            q = reduce(lambda a, b:a * b, self.discrete_parents_shape) # number of different configurations for the parents
            sigma = na.concatenate([eye] * q, axis=2)
            self.sigma = na.array(sigma, shape=[self.nvalues, self.nvalues] + \
                                  self.discrete_parents_shape) 

        # set weights to 
        self.weights = na.ones(shape=[self.nvalues] + self.parents_shape, type='Float32')

        # set the parameters : mean, sigma, weights
        self.setParameters(mu=mu, sigma=sigma, wi=wi, sigma_type=sigma_type, \
                           tied_sigma=tied_sigma, isAdjustable=isAdjustable)
Example #15
0
def inversePower(a,s,tol=1.0e-6):
    n = len(a)
    aStar = a - identity(n)*s   # Form [a*] = [a] - s[I]
    aStar = LUdecomp(aStar)     # Decompose [a*]
    x = zeros((n),type=Float64)
    for i in range(n):          # Seed [x] with random numbers
        x[i] = random()
    xMag = sqrt(dot(x,x))       # Normalize [x]
    x =x/xMag
    for i in range(50):         # Begin iterations      
        xOld = x.copy()         # Save current [x]
        x = LUsolve(aStar,x)    # Solve [a*][x] = [xOld]
        xMag = sqrt(dot(x,x))   # Normalize [x]
        x = x/xMag
        if dot(xOld,x) < 0.0:   # Detect change in sign of [x]
            sign = -1.0
            x = -x
        else: sign = 1.0
        if sqrt(dot(xOld - x,xOld - x)) < tol:
            return s + sign/xMag,x
    print 'Inverse power method did not converge'
Example #16
0
def inversePower(a, s, tol=1.0e-6):
    n = len(a)
    aStar = a - identity(n) * s  # Form [a*] = [a] - s[I]
    aStar = LUdecomp(aStar)  # Decompose [a*]
    x = zeros((n), type=Float64)
    for i in range(n):  # Seed [x] with random numbers
        x[i] = random()
    xMag = sqrt(dot(x, x))  # Normalize [x]
    x = x / xMag
    for i in range(50):  # Begin iterations
        xOld = x.copy()  # Save current [x]
        x = LUsolve(aStar, x)  # Solve [a*][x] = [xOld]
        xMag = sqrt(dot(x, x))  # Normalize [x]
        x = x / xMag
        if dot(xOld, x) < 0.0:  # Detect change in sign of [x]
            sign = -1.0
            x = -x
        else:
            sign = 1.0
        if sqrt(dot(xOld - x, xOld - x)) < tol:
            return s + sign / xMag, x
    print 'Inverse power method did not converge'
Example #17
0
    def run(self):
        # material parameter
        lam = self.prms.lameLambda
        mu = self.prms.lameMu
        rho = self.prms.density
        nu = self.prms.viscosity

        # set up boundary conditions
        pres = self.prms.pressure
        shear = self.prms.shearForce

        getLogger().info("Reading mesh from " + self.getMeshFileName())
        domain = ReadMesh(self.getMeshFileName())
        impact_forces = escript.Vector(0, FunctionOnBoundary(domain))
        impact_forces.expand()
        getLogger().info("Shape = " + str(impact_forces.getShape()))
        x = FunctionOnBoundary(domain).getX()
        getLogger().info(
            "Initialising pressure and shearing boundary conditions...")
        snapDist = 0.001
        external_forces = \
            (abs(x[1]-sup(x[1]))-snapDist).whereNegative()*[-shear,-pres] \
            +                                                          \
            (abs(x[1]-inf(x[1]))-snapDist).whereNegative()*[shear,pres]

        getLogger().info("Setting up PDE...")
        mypde = LinearPDE(domain)
        mypde.setLumpingOn()
        mypde.setValue(D=rho * identity(mypde.getDim()))

        getLogger().info("Initialising solution at t=0...")
        u = Vector(0, ContinuousFunction(domain))
        u_last = Vector(0, ContinuousFunction(domain))
        v = Vector(0, ContinuousFunction(domain))
        v_last = Vector(0, ContinuousFunction(domain))
        a = Vector(0, ContinuousFunction(domain))
        a_last = Vector(0, ContinuousFunction(domain))

        # initialise iteration prms
        tend = self.prms.maxTime
        dt = self.prms.timeStepSize
        # dt=1./5.*sqrt(rho/(lam+2*mu))*Lsup(domain.getSize())
        getLogger().info("time step size = " + str(dt))
        n = 0
        t = 0

        getLogger().info("Beginning iteration...")
        while (t < tend):
            getLogger().info("Running LSM time step...")
            self.lsm.runTimeStep()

            getLogger().info("Updating impact forces from LSM...")
            self.updateImpactStresses(impact_forces)
            getLogger().info(
              "(inf(impactForces), sup(impact_forces)) = (" + \
              str(inf(impact_forces)) + ", " + str(sup(impact_forces)) + ")"
            )

            # ... update FEM ...
            getLogger().info("Initialising PDE coefficients...")
            g = grad(u)
            stress = (lam * trace(g)) * identity(
                mypde.getDim()) + mu * (g + transpose(g))
            mypde.setValue(X=-(1.0 / (1.0 + (nu * dt / 2.0))) * stress,
                           Y=-nu * v_last - (nu * dt / 2.0) * a_last,
                           y=external_forces + impact_forces)
            getLogger().info("Solving PDE...")
            a = mypde.getSolution()

            getLogger().info("Updating displacements...")
            #            u_new=2*u-u_last+dt**2*a
            #            u_last=u
            #            u=u_new

            v_new = v_last + (dt / 2.0) * (a + a_last)
            v_last = v
            v = v_new

            u_new = u_last + dt * v + ((dt**2) / 2.0) * a
            u_last = u
            u = u_new

            a_last = a

            getLogger().info("Updating LSM mesh node positions...")
            displacement = u - u_last
            self.updateLsmMeshPosn(displacement)

            t += dt
            n += 1
            getLogger().info(str(n) + "-th time step, t=" + str(t))
            getLogger().info("a=" + str(inf(a)) + ", " + str(sup(a)))
            getLogger().info("u=" + str(inf(u)) + ", " + str(sup(u)))
            getLogger().info("inf(u-u_last) = " + str(inf(displacement)))
            getLogger().info("sup(u-u_last) = " + str(sup(displacement)))

            # ... save current acceleration and displacement
            if ((self.prms.saveDxIncr > 0)
                    and ((n % self.prms.saveDxIncr) == 0)):
                u.saveDX(
                    os.path.join(
                        self.prms.outputDir,
                        "displ.{0:d}.dx".format(n // self.prms.saveDxIncr)))
def getMaps(pt, pttype, output=False, screen=False, cyclic=True):
    jac0 = pt.labels['LC']['data'].jac0
    jac1 = pt.labels['LC']['data'].jac1
    flow = pt.labels[pttype]['flow']
    
    n = jac0.shape[0]

    # Compute jacobian times vec
    J = linalg.solve(jac1, jac0)
    if output:
        print "Jacobian J*x"
        print "------------\n"
        print J
        print "\n"
        
        print "Check Jacobian"
        print "--------------\n"
        print "   eigs = ", linalg.eig(J)[0]
        print "   eigs = ", pt.labels['LC']['data'].evals

    # Compute composition of flow maps
    
        print "Flow maps"
        print "---------\n"

    ntst = len(flow)/2
    maps = []
    if not cyclic:
        for i in range(ntst):
            I = identity(n)
            for j in mod(arange(i, i + ntst), ntst):
                j = int(j)
                I = linalg.solve(flow[2*j+1], matrixmultiply(flow[2*j], I))
            
            maps.append(I)
    
        # Check eigs of maps
        evals = []
        levecs = []
        revecs = []
        for m in maps:
            w, vl, vr = linalg.eig(m, left=1, right=1)
            evals.append(w)
            levecs.append(vl)
            revecs.append(vr)
            
        if output:
            for i in range(ntst):
                print evals[i]
            
        # Get left evecs along curve associated with 1 evalue
        evec1 = []
        good = []
        for i in range(ntst):
            ind = argsort(abs(evals[i]-1.))[0]
            if abs(evals[i][ind]-1) > 0.05:
                print "Bad floquet multipliers!"
            else:
                good.append(i)
            evec1.append(levecs[i][:,ind])
    else:
        # CYCLIC METHOD
        print "Similarity method!\n"
        I = identity(n)
        for i in range(ntst):
            I = linalg.solve(flow[2*i+1], matrixmultiply(flow[2*i], I))
            
        evec1 = []
        w, vl, vr = linalg.eig(I, left=1, right=1)
        print w, vl
        ind = argsort(abs(w-1.))[0]
        if abs(w[ind]-1) > 0.05:
            raise "Bad floquet multipliers!"
        else:
            v = vl[:,ind]
            print v
        
        for i in range(ntst):
            v = matrixmultiply(transpose(flow[2*i]), linalg.solve(transpose(flow[2*i+1]), v))
            evec1.append(v/linalg.norm(v))
            
        # print "Cyclic method!\n"
        # evals = []
        # levecs = []
        # revecs = []
        # C0 = zeros((n*ntst, n*ntst), Float64)
        # C1 = zeros((n*ntst, n*ntst), Float64)
        # for i in range(ntst):
            # C0[(n*i):(n*(i+1)), int(mod(n*(i+1), n*ntst)):int(mod(n*(i+2), n*ntst))] = flow[2*i]
            # C1[(n*i):(n*(i+1)), (n*i):(n*(i+1))] = flow[2*i+1]
            # 
        # w, vl, vr = linalg.eig(C0, C1, left=1, right=1)
        # print w
                
    # Same direction - if right eigenvector
    cycle = pt.labels[pttype]['cycle']
    coords = cycle.coordnames
    #for i in range(ntst):
    #    if matrixmultiply(evec1[i], (cycle[4*i+1]-cycle[4*i]).toarray()) < 0:
    #        evec1[i] = -1*evec1[i]
            
    if screen:
        x = cycle[coords[0]]
        y = cycle[coords[1]]
        pylab.plot(x,y)
        for i in good:
            a = [x[4*i], x[4*i] + 10*evec1[i][0]]
            b = [y[4*i], y[4*i] + 10*evec1[i][1]]
            pylab.plot(a, b, 'r', linewidth=1)
            pylab.plot([x[4*i]], [y[4*i]], 'gs')
            #pylab.plot([x[4*i], x[4*i] + 0.5*vhd[0]], [y[4*i], y[4*i] + 0.5*vhd[1]], 'b')
            #print evec1[i], vhd, "\n"
            #print matrixmultiply(evec1[i], vhd)
                  
    if screen:
        Je, JE = linalg.eig(jac0, jac1)
        ind = argsort(abs(Je-1.))[0]
        #if abs(Je[ind]-1) > 0.05:
        if 0:
            print "Jacobian: Bad floquet multipliers!"
        else:
            u, s, vh = linalg.svd(jac0-jac1, compute_uv=1)
            evec2 = [transpose(vh)[:,-1]]
            #evec2 = [JE[:,ind]]
             
            for i in range(ntst):
                evec2.append(linalg.solve(flow[2*i+1], matrixmultiply(flow[2*i], evec2[i])))
                 
            # Same direction
            for i in range(ntst):
                if matrixmultiply(evec2[i], (cycle[4*i+1]-cycle[4*i]).toarray()) < 0:
                    evec2[i] = -1*evec2[i]
                     
            for i in range(ntst):
                a = [x[4*i], x[4*i] + 0.5*evec2[i][0]]
                b = [y[4*i], y[4*i] + 0.5*evec2[i][1]]
                #pylab.plot(a, b, 'r', linewidth=1)
                #pylab.plot([x[4*i]], [y[4*i]], 'gs')
                                
    return J, maps, evec1
Example #19
0
#!/usr/bin/env python

# Generate the NetCDF Test dataset

from Scientific.IO import NetCDF
import numarray

nc = NetCDF.NetCDFFile('testdata.nc', 'w')

nc.createDimension('x', 10)
nc.createDimension('y', 10)

def funcform(x,y):
  return  (x-5)**2 + (y-5)**2

h = nc.createVariable('h', 'd', ('x','y') )
h.assignValue( numarray.fromfunction( funcform, (10,10) ) )

u = nc.createVariable('u', 'd', ('x','y') )
u.assignValue( numarray.identity(10) * 10 )

v = nc.createVariable('v', 'd', ('x','y') )
v.assignValue( numarray.ones( (10,10) ) * 5 )

nc.close()
Example #20
0
def fminPowell(func,
               x0,
               args=(),
               shouldBreak=True,
               xtol=1e-4,
               ftol=1e-4,
               xi=None,
               maxiter=None,
               maxfun=None,
               fulloutput=0,
               printmessg=1):
    """xopt,{fval,warnflag} = fmin(function, x0, args=(), xtol=1e-4, ftol=1e-4,
    maxiter=200*len(x0), maxfun=200*len(x0), fulloutput=0, printmessg=0)

    Uses a Powell's algorithm to find the minimum of function
    of one or more variables.  See Section 10.5 of Numerical Recipes in C.
    """
    p = Num.asarray(x0)
    assert (len(p.shape) == 1)
    n = len(p)
    if maxiter is None:
        maxiter = 200
    iter = 0
    if maxfun is None:
        maxfun = n * 50000
    func_calls = 0
    if xi is None:
        xi = Num.identity(n, p.typecode())

    steps = [5] * n

    fret = apply(func, (p, ) + args)
    func_calls += 1
    pt = p.copy()
    ptt = p.copy()
    xit = xi[0].copy()

    while True:
        ibig = -1
        delta = -1e20
        iter += 1
        if (iter > maxiter):
            if printmessg:
                print "Warning: Maximum number of iterations has been exceeded: ", iter
            warnflag = True
            break
        if (func_calls > maxfun):
            if printmessg:
                print "Warning: Maximum number of function evaluations has been exceeded: ", func_calls
            warnflag = True
            break
        # perform a line search in each direction
        fptt = fp = fret
        for i in range(0, n):
            fptt = fret
            alpha, fret, fc, steps[i] = line_search(func, p, xi[i], args, xtol,
                                                    ftol, 3, 0, steps[i])
            print "alpa:", alpha, "a-b/2", steps[i]
            #print __name__, "xi[i]", xi[i], "alpha: ", alpha
            print __name__, "p:", p
            p += alpha * xi[i]
            print __name__, "p:", p
            func_calls += fc
            if ((fptt - fret) > delta):
                delta = fptt - fret
                ibig = i
        if (delta < 0):
            if printmessg:
                print "Warning: All directions head up hill"
            warnflag = True
            break

        # find the total movement - and extrapolate in that direction
        #ptt = 2*p-pt

        xit = p - pt
        pt = p.copy()

        steps[ibig] = steps[0]
        xi[ibig] = xi[0]
        steps[0] = 1
        xi[0] = xit
        if not shouldBreak:
            print __name__, "Big Old Direction Vector:", xi[ibig]
            print __name__, "New Direction Vector:", xit
        storeDirectionVectors(xi)

        if (shouldBreak
                and 2.0 * abs(fp - fret) <= ftol * (abs(fp) + abs(fret))):
            if printmessg:
                print "Optimisation successful"
            warnflag = False
            break

    if printmessg:
        print "         Current function value: %f" % fret
        print "         Iterations: %d" % iter
        print "         Function evaluations: %d" % func_calls
    try:
        apply(func, (None, ))  #tell the function thread to stop
    except TypeError:
        print "Function not support None argument"

    if fulloutput:
        return p, fret, func_calls, warnflag
    else:
        return p
Example #21
0
def getMaps(pt, pttype, output=False, screen=False, cyclic=True):
    jac0 = pt.labels['LC']['data'].jac0
    jac1 = pt.labels['LC']['data'].jac1
    flow = pt.labels[pttype]['flow']

    n = jac0.shape[0]

    # Compute jacobian times vec
    J = linalg.solve(jac1, jac0)
    if output:
        print "Jacobian J*x"
        print "------------\n"
        print J
        print "\n"

        print "Check Jacobian"
        print "--------------\n"
        print "   eigs = ", linalg.eig(J)[0]
        print "   eigs = ", pt.labels['LC']['data'].evals

        # Compute composition of flow maps

        print "Flow maps"
        print "---------\n"

    ntst = len(flow) / 2
    maps = []
    if not cyclic:
        for i in range(ntst):
            I = identity(n)
            for j in mod(arange(i, i + ntst), ntst):
                j = int(j)
                I = linalg.solve(flow[2 * j + 1],
                                 matrixmultiply(flow[2 * j], I))

            maps.append(I)

        # Check eigs of maps
        evals = []
        levecs = []
        revecs = []
        for m in maps:
            w, vl, vr = linalg.eig(m, left=1, right=1)
            evals.append(w)
            levecs.append(vl)
            revecs.append(vr)

        if output:
            for i in range(ntst):
                print evals[i]

        # Get left evecs along curve associated with 1 evalue
        evec1 = []
        good = []
        for i in range(ntst):
            ind = argsort(abs(evals[i] - 1.))[0]
            if abs(evals[i][ind] - 1) > 0.05:
                print "Bad floquet multipliers!"
            else:
                good.append(i)
            evec1.append(levecs[i][:, ind])
    else:
        # CYCLIC METHOD
        print "Similarity method!\n"
        I = identity(n)
        for i in range(ntst):
            I = linalg.solve(flow[2 * i + 1], matrixmultiply(flow[2 * i], I))

        evec1 = []
        w, vl, vr = linalg.eig(I, left=1, right=1)
        print w, vl
        ind = argsort(abs(w - 1.))[0]
        if abs(w[ind] - 1) > 0.05:
            raise "Bad floquet multipliers!"
        else:
            v = vl[:, ind]
            print v

        for i in range(ntst):
            v = matrixmultiply(transpose(flow[2 * i]),
                               linalg.solve(transpose(flow[2 * i + 1]), v))
            evec1.append(v / linalg.norm(v))

        # print "Cyclic method!\n"
        # evals = []
        # levecs = []
        # revecs = []
        # C0 = zeros((n*ntst, n*ntst), Float64)
        # C1 = zeros((n*ntst, n*ntst), Float64)
        # for i in range(ntst):
        # C0[(n*i):(n*(i+1)), int(mod(n*(i+1), n*ntst)):int(mod(n*(i+2), n*ntst))] = flow[2*i]
        # C1[(n*i):(n*(i+1)), (n*i):(n*(i+1))] = flow[2*i+1]
        #
        # w, vl, vr = linalg.eig(C0, C1, left=1, right=1)
        # print w

    # Same direction - if right eigenvector
    cycle = pt.labels[pttype]['cycle']
    coords = cycle.coordnames
    #for i in range(ntst):
    #    if matrixmultiply(evec1[i], (cycle[4*i+1]-cycle[4*i]).toarray()) < 0:
    #        evec1[i] = -1*evec1[i]

    if screen:
        x = cycle[coords[0]]
        y = cycle[coords[1]]
        pylab.plot(x, y)
        for i in good:
            a = [x[4 * i], x[4 * i] + 10 * evec1[i][0]]
            b = [y[4 * i], y[4 * i] + 10 * evec1[i][1]]
            pylab.plot(a, b, 'r', linewidth=1)
            pylab.plot([x[4 * i]], [y[4 * i]], 'gs')
            #pylab.plot([x[4*i], x[4*i] + 0.5*vhd[0]], [y[4*i], y[4*i] + 0.5*vhd[1]], 'b')
            #print evec1[i], vhd, "\n"
            #print matrixmultiply(evec1[i], vhd)

    if screen:
        Je, JE = linalg.eig(jac0, jac1)
        ind = argsort(abs(Je - 1.))[0]
        #if abs(Je[ind]-1) > 0.05:
        if 0:
            print "Jacobian: Bad floquet multipliers!"
        else:
            u, s, vh = linalg.svd(jac0 - jac1, compute_uv=1)
            evec2 = [transpose(vh)[:, -1]]
            #evec2 = [JE[:,ind]]

            for i in range(ntst):
                evec2.append(
                    linalg.solve(flow[2 * i + 1],
                                 matrixmultiply(flow[2 * i], evec2[i])))

            # Same direction
            for i in range(ntst):
                if matrixmultiply(
                        evec2[i],
                    (cycle[4 * i + 1] - cycle[4 * i]).toarray()) < 0:
                    evec2[i] = -1 * evec2[i]

            for i in range(ntst):
                a = [x[4 * i], x[4 * i] + 0.5 * evec2[i][0]]
                b = [y[4 * i], y[4 * i] + 0.5 * evec2[i][1]]
                #pylab.plot(a, b, 'r', linewidth=1)
                #pylab.plot([x[4*i]], [y[4*i]], 'gs')

    return J, maps, evec1
Example #22
0
def fminBFGS(f,
             x0,
             fprime=None,
             args=(),
             avegtol=1e-5,
             maxiter=None,
             fulloutput=0,
             printmessg=1):
    """xopt = fminBFGS(f, x0, fprime=None, args=(), avegtol=1e-5,
                       maxiter=None, fulloutput=0, printmessg=1)

    Optimize the function, f, whose gradient is given by fprime using the
    quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)
    See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
    """

    app_fprime = 0
    if fprime is None:
        app_fprime = 1

    x0 = Num.asarray(x0)
    if maxiter is None:
        maxiter = len(x0) * 200
    func_calls = 0
    grad_calls = 0
    k = 0
    N = len(x0)
    gtol = N * avegtol
    I = Num.identity(N)
    Hk = I

    if app_fprime:
        gfk = apply(approx_fprime, (x0, f) + args)
        func_calls = func_calls + len(x0) + 1
    else:
        gfk = apply(fprime, (x0, ) + args)
        grad_calls = grad_calls + 1
    xk = x0
    sk = [2 * gtol]
    while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter):
        pk = -Num.dot(Hk, gfk)
        alpha_k, fc, gc = line_search_BFGS(f, xk, pk, gfk, args)
        func_calls = func_calls + fc
        xkp1 = xk + alpha_k * pk
        sk = xkp1 - xk
        xk = xkp1
        if app_fprime:
            gfkp1 = apply(approx_fprime, (xkp1, f) + args)
            func_calls = func_calls + gc + len(x0) + 1
        else:
            gfkp1 = apply(fprime, (xkp1, ) + args)
            grad_calls = grad_calls + gc + 1

        yk = gfkp1 - gfk
        k = k + 1

        rhok = 1 / Num.dot(yk, sk)
        A1 = I - sk[:, Num.NewAxis] * yk[Num.NewAxis, :] * rhok
        A2 = I - yk[:, Num.NewAxis] * sk[Num.NewAxis, :] * rhok
        Hk = Num.dot(A1, Num.dot(
            Hk, A2)) + rhok * sk[:, Num.NewAxis] * sk[Num.NewAxis, :]
        gfk = gfkp1

    if printmessg or fulloutput:
        fval = apply(f, (xk, ) + args)
    if k >= maxiter:
        warnflag = 1
        if printmessg:
            print "Warning: Maximum number of iterations has been exceeded"
            print "         Current function value: %f" % fval
            print "         Iterations: %d" % k
            print "         Function evaluations: %d" % func_calls
            print "         Gradient evaluations: %d" % grad_calls
    else:
        warnflag = 0
        if printmessg:
            print "Optimization terminated successfully."
            print "         Current function value: %f" % fval
            print "         Iterations: %d" % k
            print "         Function evaluations: %d" % func_calls
            print "         Gradient evaluations: %d" % grad_calls

    if fulloutput:
        return xk, fval, func_calls, grad_calls, warnflag
    else:
        return xk
Example #23
0
#!/usr/bin/python
## example2_14
from numarray import array,ones,identity,Float64
from LUdecomp3 import *

n = 6
d = ones((n))*2.0
e = ones((n-1))*(-1.0)
c = e.copy()
d[n-1] = 5.0
aInv = identity(n)*1.0
c,d,e = LUdecomp3(c,d,e)
for i in range(n):
    aInv[:,i] = LUsolve3(c,d,e,aInv[:,i])
print "\nThe inverse matrix is:\n",aInv
raw_input("\nPress return to exit")
Example #24
0
#!/usr/bin/python
## example2_14
from numarray import array, ones, identity, Float64
from LUdecomp3 import *

n = 6
d = ones((n)) * 2.0
e = ones((n - 1)) * (-1.0)
c = e.copy()
d[n - 1] = 5.0
aInv = identity(n) * 1.0
c, d, e = LUdecomp3(c, d, e)
for i in range(n):
    aInv[:, i] = LUsolve3(c, d, e, aInv[:, i])
print "\nThe inverse matrix is:\n", aInv
raw_input("\nPress return to exit")