Пример #1
0
def LUsolve(a,b):
    n = len(a)
    for k in range(1,n):
        b[k] = b[k] - dot(a[k,0:k],b[0:k])  
    for k in range(n-1,-1,-1):
       b[k] = (b[k] - dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
    return b
Пример #2
0
def perceptron(x, y, n, debug=0):
    """implements a perceptron
    
    as defined in "Support Vector Machines", Cristiani, p.12
    @x is a list of i 'numarray.array's
    @y is a list of i correct outputs for x
    @n is the learning rate 0 < n < 1"""
    w = na.zeros(x[0].shape[0], na.Float32)  #weights
    b = 0  #bias
    k = 0  #error count
    R = max_norm(x)**2
    if (debug): print "R= ", R
    iteration = 0  #number of iterations
    mistake_free = 0
    while mistake_free == 0 and iteration < 100:
        iteration += 1
        if (debug): print "iteration #", iteration
        mistake_free = 1
        for i in range(len(x)):
            input_vec = x[i]
            expected_out = y[i]
            actual_out = y[i] * (na.dot(w, x[i]) + b)
            if actual_out <= 0:
                w += n * y[i] * x[i]
                if (debug): print n, y[i], x[i], w, b
                b += n + (y[i] - (na.dot(w, x[i]) + b))  # * R
                k += 1
                mistake_free = 0
    return (w, b, k)
Пример #3
0
def perceptron(x, y, n, debug = 0):
    """implements a perceptron
    
    as defined in "Support Vector Machines", Cristiani, p.12
    @x is a list of i 'numarray.array's
    @y is a list of i correct outputs for x
    @n is the learning rate 0 < n < 1"""
    w = na.zeros(x[0].shape[0], na.Float32) #weights
    b = 0                                   #bias
    k = 0                                   #error count
    R = max_norm(x) ** 2
    if(debug): print "R= ", R
    iteration = 0                           #number of iterations
    mistake_free = 0
    while mistake_free == 0 and iteration < 100:
        iteration += 1
        if(debug): print "iteration #", iteration
        mistake_free = 1
        for i in range(len(x)):
            input_vec = x[i]
            expected_out = y[i]
            actual_out = y[i]*(na.dot(w, x[i]) + b)
            if actual_out <= 0:
                w += n * y[i] * x[i]
                if(debug): print n, y[i], x[i], w, b
                b += n + (y[i] - (na.dot(w, x[i]) + b)) # * R
                k += 1
                mistake_free = 0
    return (w, b, k)
def inversePower3(d, c, s, tol=1.0e-6):
    n = len(d)
    e = c.copy()
    cc = c.copy()  # Save original [c]
    dStar = d - s  # Form [A*] = [A] - s[I]
    LUdecomp3(cc, dStar, e)  # Decompose [A*]
    x = zeros((n), type=Float64)
    for i in range(n):  # Seed [x] with random numbers
        x[i] = random()
    xMag = sqrt(dot(x, x))  # Normalize [x]
    x = x / xMag
    flag = 0
    for i in range(30):  # Begin iterations
        xOld = x.copy()  # Save current [x]
        LUsolve3(cc, dStar, e, x)  # Solve [A*][x] = [xOld]
        xMag = sqrt(dot(x, x))  # Normalize [x]
        x = x / xMag
        if dot(xOld, x) < 0.0:  # Detect change in sign of [x]
            sign = -1.0
            x = -x
        else:
            sign = 1.0
        if sqrt(dot(xOld - x, xOld - x)) < tol:
            return s + sign / xMag, x
    print "Inverse power method did not converge"
Пример #5
0
def inversePower3(d, c, s, tol=1.0e-6):
    n = len(d)
    e = c.copy()
    cc = c.copy()  # Save original [c]
    dStar = d - s  # Form [A*] = [A] - s[I]
    LUdecomp3(cc, dStar, e)  # Decompose [A*]
    x = zeros((n), type=Float64)
    for i in range(n):  # Seed [x] with random numbers
        x[i] = random()
    xMag = sqrt(dot(x, x))  # Normalize [x]
    x = x / xMag
    flag = 0
    for i in range(30):  # Begin iterations
        xOld = x.copy()  # Save current [x]
        LUsolve3(cc, dStar, e, x)  # Solve [A*][x] = [xOld]
        xMag = sqrt(dot(x, x))  # Normalize [x]
        x = x / xMag
        if dot(xOld, x) < 0.0:  # Detect change in sign of [x]
            sign = -1.0
            x = -x
        else:
            sign = 1.0
        if sqrt(dot(xOld - x, xOld - x)) < tol:
            return s + sign / xMag, x
    print 'Inverse power method did not converge'
Пример #6
0
def LUsolve(a, b):
    n = len(a)
    for k in range(1, n):
        b[k] = b[k] - dot(a[k, 0:k], b[0:k])
    for k in range(n - 1, -1, -1):
        b[k] = (b[k] - dot(a[k, k + 1:n], b[k + 1:n])) / a[k, k]
    return b
Пример #7
0
 def map(x,y,s,t):
     N = zeros((4),type=Float64)
     N[0] = (1.0 - s)*(1.0 - t)/4.0
     N[1] = (1.0 + s)*(1.0 - t)/4.0
     N[2] = (1.0 + s)*(1.0 + t)/4.0
     N[3] = (1.0 - s)*(1.0 + t)/4.0
     xCoord = dot(N,x)
     yCoord = dot(N,y)
     return xCoord,yCoord
Пример #8
0
def choleski(a):
    n = len(a)
    for k in range(n):
        try:
            a[k,k] = sqrt(a[k,k] - dot(a[k,0:k],a[k,0:k]))
        except ValueError:
            error.err('Matrix is not positive definite')
        for i in range(k+1,n):
            a[i,k] = (a[i,k] - dot(a[i,0:k],a[k,0:k]))/a[k,k]
    for k in range(1,n): a[0:k,k] = 0.0
    return a
Пример #9
0
def choleski(a):
    n = len(a)
    for k in range(n):
        try:
            a[k, k] = sqrt(a[k, k] - dot(a[k, 0:k], a[k, 0:k]))
        except ValueError:
            error.err('Matrix is not positive definite')
        for i in range(k + 1, n):
            a[i, k] = (a[i, k] - dot(a[i, 0:k], a[k, 0:k])) / a[k, k]
    for k in range(1, n):
        a[0:k, k] = 0.0
    return a
Пример #10
0
def regress(X):
    """
    Apply a linear regression on X.

    X is an numarray array of form [Y|X].
    We split this up, do the regression and return the
    regression coefficients, beta_i.
    """
    X = N.array(X)
    Y = X[:,0].copy()
    X[:,0] = 1.0
    a = N.dot(NL.inverse(N.dot(N.transpose(X), X)), N.transpose(X))
    return N.dot(a, Y)
Пример #11
0
def LUsolve(a,b,seq):
    n = len(a)
    
  # Rearrange constant vector; store it in [x]
    x = b.copy()
    for i in range(n):
        x[i] = b[seq[i]]
        
  # Solution
    for k in range(1,n):
        x[k] = x[k] - dot(a[k,0:k],x[0:k])  
    for k in range(n-1,-1,-1):
       x[k] = (x[k] - dot(a[k,k+1:n],x[k+1:n]))/a[k,k]
    return x
Пример #12
0
def LUsolve(a, b, seq):
    n = len(a)

    # Rearrange constant vector; store it in [x]
    x = b.copy()
    for i in range(n):
        x[i] = b[seq[i]]

    # Solution
    for k in range(1, n):
        x[k] = x[k] - dot(a[k, 0:k], x[0:k])
    for k in range(n - 1, -1, -1):
        x[k] = (x[k] - dot(a[k, k + 1 : n], x[k + 1 : n])) / a[k, k]
    return x
Пример #13
0
def conjGrad(Av,x,b,tol=1.0e-9):
    n = len(b)
    r = b - Av(x)
    s = r.copy()
    for i in range(n):
        u = Av(s)
        alpha = dot(s,r)/dot(s,u)
        x = x + alpha*s
        r = b - Av(x)
        if(sqrt(dot(r,r))) < tol:
            break
        else:
            beta = -dot(r,u)/dot(s,u)
            s = r + beta*s
    return x,i
Пример #14
0
def householder(a):
    n = len(a)
    for k in range(n - 2):
        u = a[k + 1:n, k]
        uMag = sqrt(dot(u, u))
        if u[0] < 0.0: uMag = -uMag
        u[0] = u[0] + uMag
        h = dot(u, u) / 2.0
        v = matrixmultiply(a[k + 1:n, k + 1:n], u) / h
        g = dot(u, v) / (2.0 * h)
        v = v - g * u
        a[k+1:n,k+1:n] = a[k+1:n,k+1:n] - outerproduct(v,u) \
                         - outerproduct(u,v)
        a[k, k + 1] = -uMag
    return diagonal(a), diagonal(a, 1)
Пример #15
0
def conjGrad(Av, x, b, tol=1.0e-9):
    n = len(b)
    r = b - Av(x)
    s = r.copy()
    for i in range(n):
        u = Av(s)
        alpha = dot(s, r) / dot(s, u)
        x = x + alpha * s
        r = b - Av(x)
        if (sqrt(dot(r, r))) < tol:
            break
        else:
            beta = -dot(r, u) / dot(s, u)
            s = r + beta * s
    return x, i
Пример #16
0
def householder(a): 
    n = len(a)
    for k in range(n-2):
        u = a[k+1:n,k]
        uMag = sqrt(dot(u,u))
        if u[0] < 0.0: uMag = -uMag
        u[0] = u[0] + uMag
        h = dot(u,u)/2.0
        v = matrixmultiply(a[k+1:n,k+1:n],u)/h
        g = dot(u,v)/(2.0*h)
        v = v - g*u
        a[k+1:n,k+1:n] = a[k+1:n,k+1:n] - outerproduct(v,u) \
                         - outerproduct(u,v)
        a[k,k+1] = -uMag
    return diagonal(a),diagonal(a,1)
Пример #17
0
def line_search(f, fprime, xk, pk, gfk, args=(), c1=1e-4, c2=0.9, amax=50):
    """alpha, fc, gc = line_search(f, xk, pk, gfk,
                                   args=(), c1=1e-4, c2=0.9, amax=1)

    minimize the function f(xk+alpha pk) using the line search algorithm of
    Wright and Nocedal in 'Numerical Optimization', 1999, pg. 59-60
    """

    fc = 0
    gc = 0
    alpha0 = 1.0
    phi0 = apply(f, (xk, ) + args)
    phi_a0 = apply(f, (xk + alpha0 * pk, ) + args)
    fc = fc + 2
    derphi0 = Num.dot(gfk, pk)
    derphi_a0 = Num.dot(apply(fprime, (xk + alpha0 * pk, ) + args), pk)
    gc = gc + 1

    # check to see if alpha0 = 1 satisfies Strong Wolfe conditions.
    if (phi_a0 <= phi0 + c1*alpha0*derphi0) \
       and (abs(derphi_a0) <= c2*abs(derphi0)):
        return alpha0, fc, gc

    alpha0 = 0
    alpha1 = 1
    phi_a1 = phi_a0
    phi_a0 = phi0

    i = 1
    while 1:
        if (phi_a1 > phi0 + c1*alpha1*derphi0) or \
           ((phi_a1 >= phi_a0) and (i > 1)):
            return zoom(alpha0, alpha1)

        derphi_a1 = Num.dot(apply(fprime, (xk + alpha1 * pk, ) + args), pk)
        gc = gc + 1
        if (abs(derphi_a1) <= -c2 * derphi0):
            return alpha1

        if (derphi_a1 >= 0):
            return zoom(alpha1, alpha0)

        alpha2 = (amax - alpha1) * 0.25 + alpha1
        i = i + 1
        alpha0 = alpha1
        alpha1 = alpha2
        phi_a0 = phi_a1
        phi_a1 = apply(f, (xk + alpha1 * pk, ) + args)
Пример #18
0
 def invert(L):  # Inverts lower triangular matrix L
     n = len(L)
     for j in range(n - 1):
         L[j, j] = 1.0 / L[j, j]
         for i in range(j + 1, n):
             L[i, j] = -dot(L[i, j:i], L[j:i, j]) / L[i, i]
     L[n - 1, n - 1] = 1.0 / L[n - 1, n - 1]
Пример #19
0
def perceptron(x, y, n, debug=0):
    """implements a perceptron
    
    modified from "Support Vector Machines", Cristiani, p.12
    @x is a list of i 'numarray.array's
    @y is a list of i correct outputs for x
    @n is the learning rate 0 < n < 1
    @debug is 1 for debugging, 0 for silent"""
    w = na.zeros(x[0].shape[0], na.Float32)  #weights
    b = 0  #bias
    k = 0  #error count
    iteration = 0  #number of iterations
    mistake_free = 0
    while not mistake_free and iteration < 100:
        iteration += 1
        if (debug): print "iteration #", iteration
        mistake_free = 1
        for i in range(len(x)):
            actual_out = na.dot(w, x[i]) + b  #<w*x> + b
            if y[i] * actual_out <= 0:
                w += n * y[i] * x[i]
                if (debug): print n, y[i], x[i], w, b
                b += y[i] - actual_out
                k += 1
                mistake_free = 0
    return (w, b, k)
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
                               CONV_THRESH=1.e-3,MAXIT=500):
    """
 Faster than logistic_regression when there is only one predictor.
    """
    if len(x) != len(y):
        raise ValueError, "x and y should be the same length!"
    if beta_start is None:
        beta_start = NA.zeros(2,x.dtype.char)
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
        s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)])  # scoring function
        # information matrix
        J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
                          [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    return beta, J_bar, l
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
                               CONV_THRESH=1.e-3,MAXIT=500):
    """
 Faster than logistic_regression when there is only one predictor.
    """
    if len(x) != len(y):
        raise ValueError, "x and y should be the same length!"
    if beta_start is None:
        beta_start = NA.zeros(2,x.typecode())
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
        s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)])  # scoring function
        # information matrix
        J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
                          [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    return beta, J_bar, l
Пример #22
0
def dual_perceptron(x, y):
    """implements a dual form perceptron
    
    as defined in "Support Vector Machines", Cristiani, p.18
    @x is a list of i 'numarray.array's that define the input
    @y is a list of i correct outputs for x, must be syncronized with x
        i.e. y[3] == correct f(x[3])
    @n is the learning rate 0 < n < 1"""
    a = na.zeros(len(x), na.Float32)     #embedding strength = alpha
    b = 0                                       #bias
    R = math.pow(max_norm(x), 2)
    mistake_free = 0
    iteration = 0
    while mistake_free == 0 and iteration < 50:
        iteration += 1
        print "iteration #", iteration
        mistake_free = 1
        for i in range(len(x)):
            sum_lc = 0
            for j in range(len(x)):
                sum_lc += a[j] * y[j] * na.dot(x[j], x[i]) #+ b
            print sum_lc, a, y, x, b
            if y[i] * (sum_lc + b) <= 0:
                print b, a, sum_lc, y[i]
                a[i] += 1
                b += y[i] * R
                mistake_free = 0
    return (a, b)
Пример #23
0
def perceptron(x, y, n, debug = 0):
    """implements a perceptron
    
    modified from "Support Vector Machines", Cristiani, p.12
    @x is a list of i 'numarray.array's
    @y is a list of i correct outputs for x
    @n is the learning rate 0 < n < 1
    @debug is 1 for debugging, 0 for silent"""
    w = na.zeros(x[0].shape[0], na.Float32) #weights
    b = 0                                   #bias
    k = 0                                   #error count
    iteration = 0                           #number of iterations
    mistake_free = 0
    while not mistake_free and iteration < 100:
        iteration += 1
        if(debug): print "iteration #", iteration
        mistake_free = 1
        for i in range(len(x)):
            actual_out = na.dot(w, x[i]) + b #<w*x> + b
            if y[i] * actual_out <= 0:
                w += n * y[i] * x[i]
                if(debug): print n, y[i], x[i], w, b
                b += y[i] - actual_out
                k += 1
                mistake_free = 0
    return (w, b, k)
Пример #24
0
def powell(F, x, h=0.1, tol=1.0e-6):
    def f(s):
        return F(x + s * v)  # F in direction of v

    n = len(x)  # Number of design variables
    df = zeros((n), type=Float64)  # Decreases of F stored here
    u = identity(n) * 1.0  # Vectors v stored here by rows
    for j in range(30):  # Allow for 30 cycles:
        xOld = x.copy()  # Save starting point
        fOld = F(xOld)
        # First n line searches record decreases of F
        for i in range(n):
            v = u[i]
            a, b = bracket(f, 0.0, h)
            s, fMin = search(f, a, b)
            df[i] = fOld - fMin
            fOld = fMin
            x = x + s * v
    # Last line search in the cycle
        v = x - xOld
        a, b = bracket(f, 0.0, h)
        s, fLast = search(f, a, b)
        x = x + s * v
        # Check for convergence
        if sqrt(dot(x - xOld, x - xOld) / n) < tol: return x, j + 1
        # Identify biggest decrease & update search directions
        iMax = int(argmax(df))
        for i in range(iMax, n - 1):
            u[i] = u[i + 1]
            u[n - 1] = v
    print "Powell did not converge"
Пример #25
0
def powell(F,x,h=0.1,tol=1.0e-6):
    
    def f(s): return F(x + s*v)    # F in direction of v

    n = len(x)                     # Number of design variables
    df = zeros((n),type=Float64)   # Decreases of F stored here
    u = identity(n)*1.0            # Vectors v stored here by rows
    for j in range(30):            # Allow for 30 cycles:
        xOld = x.copy()            # Save starting point
        fOld = F(xOld)
      # First n line searches record decreases of F
        for i in range(n):
            v = u[i]
            a,b = bracket(f,0.0,h)
            s,fMin = search(f,a,b)
            df[i] = fOld - fMin
            fOld = fMin
            x = x + s*v
      # Last line search in the cycle    
        v = x - xOld
        a,b = bracket(f,0.0,h)
        s,fLast = search(f,a,b)
        x = x + s*v
      # Check for convergence
        if sqrt(dot(x-xOld,x-xOld)/n) < tol: return x,j+1
      # Identify biggest decrease & update search directions
        iMax = int(argmax(df))
        for i in range(iMax,n-1):
            u[i] = u[i+1]
            u[n-1] = v
    print "Powell did not converge"        
Пример #26
0
 def invert(L):  # Inverts lower triangular matrix L
     n = len(L)
     for j in range(n - 1):
         L[j, j] = 1.0 / L[j, j]
         for i in range(j + 1, n):
             L[i, j] = -dot(L[i, j:i], L[j:i, j]) / L[i, i]
     L[n - 1, n - 1] = 1.0 / L[n - 1, n - 1]
Пример #27
0
def dual_perceptron(x, y):
    """implements a dual form perceptron
    
    as defined in "Support Vector Machines", Cristiani, p.18
    @x is a list of i 'numarray.array's that define the input
    @y is a list of i correct outputs for x, must be syncronized with x
        i.e. y[3] == correct f(x[3])
    @n is the learning rate 0 < n < 1"""
    a = na.zeros(len(x), na.Float32)  #embedding strength = alpha
    b = 0  #bias
    R = math.pow(max_norm(x), 2)
    mistake_free = 0
    iteration = 0
    while mistake_free == 0 and iteration < 50:
        iteration += 1
        print "iteration #", iteration
        mistake_free = 1
        for i in range(len(x)):
            sum_lc = 0
            for j in range(len(x)):
                sum_lc += a[j] * y[j] * na.dot(x[j], x[i])  #+ b
            print sum_lc, a, y, x, b
            if y[i] * (sum_lc + b) <= 0:
                print b, a, sum_lc, y[i]
                a[i] += 1
                b += y[i] * R
                mistake_free = 0
    return (a, b)
Пример #28
0
 def __mul__(self, other):
     aother = asarray(other)
     #if len(aother.shape) == 0:
     #    return self._rc(self*aother)
     #else:
     #    return self._rc(dot(self, aother))
     #return self._rc(dot(self, aother))
     return dot(self, aother)
Пример #29
0
 def __mul__(self, other):
     aother = asarray(other)
     #if len(aother.shape) == 0:
     #    return self._rc(self*aother)
     #else:
     #    return self._rc(dot(self, aother))
     #return self._rc(dot(self, aother))
     return dot(self, aother)
Пример #30
0
 def getWeights(sample):
     # Evaluate PDFs for this sample.
     pds = numarray.array([ f(sample) for f in pdfs ])
     # Compute an array of the weights.
     denominator = numarray.dot(nums, pds)
     if denominator != 0:
         return numarray.matrixmultiply(V, pds) / denominator
     else:
         return numarray.zeros((num_categories, ), "Float32")
Пример #31
0
def computeP(a): 
    n = len(a)
    p = identity(n)*1.0
    for k in range(n-2):
        u = a[k+1:n,k]
        h = dot(u,u)/2.0
        v = matrixmultiply(p[1:n,k+1:n],u)/h           
        p[1:n,k+1:n] = p[1:n,k+1:n] - outerproduct(v,u)
    return p
Пример #32
0
def computeP(a):
    n = len(a)
    p = identity(n) * 1.0
    for k in range(n - 2):
        u = a[k + 1:n, k]
        h = dot(u, u) / 2.0
        v = matrixmultiply(p[1:n, k + 1:n], u) / h
        p[1:n, k + 1:n] = p[1:n, k + 1:n] - outerproduct(v, u)
    return p
Пример #33
0
def findmec(th0, th1):
    delta = 1e-3
    if th0 < 0 and th0 - th1 < .1:
        sm = 5.
        sp = 6.
    else:
        sm = 3.
        sp = 5.
    params = [sm, sp]
    lasterr = 1e6
    for i in range(25):
        sm, sp = params
        ath0, ath1 = trymec(sm, sp)
        c1c, c2c = th0 - ath0, th1 - ath1

        err = c1c * c1c + c2c * c2c
        if 0:
            print '%findmec', sm, sp, ath0, ath1, err
            sys.stdout.flush()

        if err < 1e-9:
            return params
        if err > lasterr:
            return None
        lasterr = err


        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1p, c2p = th0 - ath0, th1 - ath1

            params1 = N.array(params)
            params1[j] -= delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1m, c2m = th0 - ath0, th1 - ath1

            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))

        jm = N.array([dc1s, dc2s])
        ji = la.inverse(jm)
        dp = N.dot(ji, [c1c, c2c])

        if i < 4:
            scale = .5
        else:
            scale = 1
        params -= scale * dp
        if params[0] < 0: params[0] = 0.
    return params
Пример #34
0
def findmec(th0, th1):
    delta = 1e-3
    if th0 < 0 and th0 - th1 < .1:
        sm = 5.
        sp = 6.
    else:
        sm = 3.
        sp = 5.
    params = [sm, sp]
    lasterr = 1e6
    for i in range(25):
        sm, sp = params
        ath0, ath1 = trymec(sm, sp)
        c1c, c2c = th0 - ath0, th1 - ath1

        err = c1c * c1c + c2c * c2c
        if 0:
            print '%findmec', sm, sp, ath0, ath1, err
            sys.stdout.flush()

        if err < 1e-9:
            return params
        if err > lasterr:
            return None
        lasterr = err

        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1p, c2p = th0 - ath0, th1 - ath1

            params1 = N.array(params)
            params1[j] -= delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1m, c2m = th0 - ath0, th1 - ath1

            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))

        jm = N.array([dc1s, dc2s])
        ji = la.inverse(jm)
        dp = N.dot(ji, [c1c, c2c])

        if i < 4:
            scale = .5
        else:
            scale = 1
        params -= scale * dp
        if params[0] < 0: params[0] = 0.
    return params
Пример #35
0
def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1):
    """alpha, fc, gc = line_search(f, xk, pk, gfk,
                                   args=(), c1=1e-4, alpha0=1)

    minimize over alpha, the function f(xk+alpha pk) using the interpolation
    algorithm (Armiijo backtracking) as suggested by
    Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
    """

    fc = 0
    phi0 = apply(f, (xk, ) + args)  # compute f(xk)
    phi_a0 = apply(f, (xk + alpha0 * pk, ) + args)  # compute f
    fc = fc + 2
    derphi0 = Num.dot(gfk, pk)

    if (phi_a0 <= phi0 + c1 * alpha0 * derphi0):
        return alpha0, fc, 0

    # Otherwise compute the minimizer of a quadratic interpolant:

    alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
    phi_a1 = apply(f, (xk + alpha1 * pk, ) + args)
    fc = fc + 1

    if (phi_a1 <= phi0 + c1 * alpha1 * derphi0):
        return alpha1, fc, 0

    # Otherwise loop with cubic interpolation until we find an alpha which satifies
    #  the first Wolfe condition (since we are backtracking, we will assume that
    #  the value of alpha is not too small and satisfies the second condition.

    while 1:  # we are assuming pk is a descent direction
        factor = alpha0**2 * alpha1**2 * (alpha1 - alpha0)
        a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
            alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
        a = a / factor
        b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
            alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
        b = b / factor

        alpha2 = (-b + Num.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0 * a)
        phi_a2 = apply(f, (xk + alpha2 * pk, ) + args)
        fc = fc + 1

        if (phi_a2 <= phi0 + c1 * alpha2 * derphi0):
            return alpha2, fc, 0

        if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2 / alpha1) < 0.96:
            alpha2 = alpha1 / 2.0

        alpha0 = alpha1
        alpha1 = alpha2
        phi_a0 = phi_a1
        phi_a1 = phi_a2
def optimize(F, gradF, x, h=0.1, tol=1.0e-6):
    def f(s):
        return F(x + s * v)  # Line function along v

    n = len(x)
    g0 = -gradF(x)
    v = g0.copy()
    F0 = F(x)
    for i in range(200):
        a, b = bracket(f, 0.0, h)  # Minimization along
        s, fMin = search(f, a, b)  # a line
        x = x + s * v
        F1 = F(x)
        g1 = -gradF(x)
        if (sqrt(dot(g1, g1)) <= tol) or (abs(F0 - F1) < tol):
            return x, i + 1
        gamma = dot((g1 - g0), g1) / dot(g0, g0)
        v = g1 + gamma * v
        g0 = g1.copy()
        F0 = F1
    print "fletcherReeves did not converge"
Пример #37
0
def newtonRaphson2(f, x, tol=1.0e-9):
    def jacobian(f, x):
        h = 1.0e-4
        n = len(x)
        jac = zeros((n, n), type=Float64)
        f0 = f(x)
        for i in range(n):
            temp = x[i]
            x[i] = temp + h
            f1 = f(x)
            x[i] = temp
            jac[:, i] = (f1 - f0) / h
        return jac, f0

    for i in range(30):
        jac, f0 = jacobian(f, x)
        if sqrt(dot(f0, f0) / len(x)) < tol: return x
        dx = gaussPivot(jac, -f0)
        x = x + dx
        if sqrt(dot(dx, dx)) < tol * max(max(abs(x)), 1.0): return x
    print 'Too many iterations'
Пример #38
0
def inversePower5(Bv,d,e,f,tol=1.0e-6):  
    n = len(d)
    d,e,f = LUdecomp5(d,e,f)
    x = zeros((n),type=Float64)
    for i in range(n):          # Seed {v} with random numbers
        x[i] = random()
    xMag = sqrt(dot(x,x))       # Normalize {v}
    x = x/xMag
    for i in range(30):         # Begin iterations     
        xOld = x.copy()         # Save current {v}
        x = Bv(xOld)            # Compute [B]{v}
        x = LUsolve5(d,e,f,x)   # Solve [A]{z} = [B]{v}
        xMag = sqrt(dot(x,x))   # Normalize {z}
        x = x/xMag
        if dot(xOld,x) < 0.0:   # Detect change in sign of {x}
            sign = -1.0
            x = -x
        else: sign = 1.0
        if sqrt(dot(xOld - x,xOld - x)) < tol:
            return sign/xMag,x
    print 'Inverse power method did not converge'
Пример #39
0
def response(H, f, nmode, T):
    R = 0.0019861376  # kcal/mol
    beta = 1. / (R * T)
    evals, evecs = numarray.linear_algebra.Heigenvectors(H)
    dim = len(evecs[0])
    dr = numarray.zeros(dim, numarray.Float32)
    for m in range(6, 6 + nmode):
        dot = numarray.dot(evecs[m], f)
        print dot, evals[m]
        # TODO!!! CHECK THIS AMPLITUDE FACTOR
        dr += math.sqrt(beta / evals[m]) * dot * evecs[m]
    return dr
Пример #40
0
def inversePower(a,s,tol=1.0e-6):
    n = len(a)
    aStar = a - identity(n)*s   # Form [a*] = [a] - s[I]
    aStar = LUdecomp(aStar)     # Decompose [a*]
    x = zeros((n),type=Float64)
    for i in range(n):          # Seed [x] with random numbers
        x[i] = random()
    xMag = sqrt(dot(x,x))       # Normalize [x]
    x =x/xMag
    for i in range(50):         # Begin iterations      
        xOld = x.copy()         # Save current [x]
        x = LUsolve(aStar,x)    # Solve [a*][x] = [xOld]
        xMag = sqrt(dot(x,x))   # Normalize [x]
        x = x/xMag
        if dot(xOld,x) < 0.0:   # Detect change in sign of [x]
            sign = -1.0
            x = -x
        else: sign = 1.0
        if sqrt(dot(xOld - x,xOld - x)) < tol:
            return s + sign/xMag,x
    print 'Inverse power method did not converge'
Пример #41
0
def optimize(F, gradF, x, h=0.1, tol=1.0e-6):
    def f(s):
        return F(x + s * v)  # Line function along v

    n = len(x)
    g0 = -gradF(x)
    v = g0.copy()
    F0 = F(x)
    for i in range(200):
        a, b = bracket(f, 0.0, h)  # Minimization along
        s, fMin = search(f, a, b)  # a line
        x = x + s * v
        F1 = F(x)
        g1 = -gradF(x)
        if (sqrt(dot(g1, g1)) <= tol) or (abs(F0 - F1) < tol):
            return x, i + 1
        gamma = dot((g1 - g0), g1) / dot(g0, g0)
        v = g1 + gamma * v
        g0 = g1.copy()
        F0 = F1
    print "fletcherReeves did not converge"
Пример #42
0
def gaussElimin(a,b):
    n = len(b)
  # Elimination Phase
    for k in range(0,n-1):
        for i in range(k+1,n):
           if a[i,k] != 0.0:
               lam = a [i,k]/a[k,k]
               a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
               b[i] = b[i] - lam*b[k]
  # Back substitution
    for k in range(n-1,-1,-1):
        b[k] = (b[k] - dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
    return b
Пример #43
0
def gaussElimin(a, b):
    n = len(b)
    # Elimination Phase
    for k in range(0, n - 1):
        for i in range(k + 1, n):
            if a[i, k] != 0.0:
                lam = a[i, k] / a[k, k]
                a[i, k + 1:n] = a[i, k + 1:n] - lam * a[k, k + 1:n]
                b[i] = b[i] - lam * b[k]
# Back substitution
    for k in range(n - 1, -1, -1):
        b[k] = (b[k] - dot(a[k, k + 1:n], b[k + 1:n])) / a[k, k]
    return b
Пример #44
0
def inversePower(a, s, tol=1.0e-6):
    n = len(a)
    aStar = a - identity(n) * s  # Form [a*] = [a] - s[I]
    aStar = LUdecomp(aStar)  # Decompose [a*]
    x = zeros((n), type=Float64)
    for i in range(n):  # Seed [x] with random numbers
        x[i] = random()
    xMag = sqrt(dot(x, x))  # Normalize [x]
    x = x / xMag
    for i in range(50):  # Begin iterations
        xOld = x.copy()  # Save current [x]
        x = LUsolve(aStar, x)  # Solve [a*][x] = [xOld]
        xMag = sqrt(dot(x, x))  # Normalize [x]
        x = x / xMag
        if dot(xOld, x) < 0.0:  # Detect change in sign of [x]
            sign = -1.0
            x = -x
        else:
            sign = 1.0
        if sqrt(dot(xOld - x, xOld - x)) < tol:
            return s + sign / xMag, x
    print 'Inverse power method did not converge'
Пример #45
0
def newtonRaphson2(f,x,tol=1.0e-9):
    
    def jacobian(f,x):
        h = 1.0e-4
        n = len(x)
        jac = zeros((n,n),type=Float64)
        f0 = f(x)
        for i in range(n):
            temp = x[i]
            x[i] = temp + h
            f1 = f(x)
            x[i] = temp
            jac[:,i] = (f1 - f0)/h
        return jac,f0
    
    for i in range(30):
        jac,f0 = jacobian(f,x)
        if sqrt(dot(f0,f0)/len(x)) < tol: return x
        dx = gaussPivot(jac,-f0)
        x = x + dx
        if sqrt(dot(dx,dx)) < tol*max(max(abs(x)),1.0): return x
    print 'Too many iterations'
Пример #46
0
def gaussSeidel(iterEqs, x, tol=1.0e-9):

    omega = 1.0
    k = 10
    p = 1
    for i in range(1, 501):
        xOld = x.copy()
        x = iterEqs(x, omega)
        dx = sqrt(dot(x - xOld, x - xOld))
        if dx < tol: return x, i, omega
        # Compute relaxation factor after k+p iterations
        if i == k: dx1 = dx
        if i == k + p:
            dx2 = dx
            omega = 2.0 / (1.0 + sqrt(1.0 - (dx2 / dx1)**(1.0 / p)))
    print 'Gauss-Seidel failed to converge'
def calcprob(beta, x):
    """
 calculate probabilities (in percent) given beta and x
    """
    try:
        N, npreds = x.shape[1], x.shape[0]
    except: # single predictor, x is a vector, len(beta)=2.
        N, npreds = len(x), 1
    if len(beta) != npreds+1:
        raise ValueError,'sizes of beta and x do not match!'
    if npreds==1: # simple logistic regression
        return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
    X = NA.ones((npreds+1,N), x.dtype.char)
    X[1:, :] = x
    ebx = NA.exp(NA.dot(beta, X))
    return 100.*ebx/(1.+ebx)
def calcprob(beta, x):
    """
 calculate probabilities (in percent) given beta and x
    """
    try:
        N, npreds = x.shape[1], x.shape[0]
    except: # single predictor, x is a vector, len(beta)=2.
        N, npreds = len(x), 1
    if len(beta) != npreds+1:
        raise ValueError,'sizes of beta and x do not match!'
    if npreds==1: # simple logistic regression
        return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
    X = NA.ones((npreds+1,N), x.typecode())
    X[1:, :] = x
    ebx = NA.exp(NA.dot(beta, X))
    return 100.*ebx/(1.+ebx)
Пример #49
0
def gaussSeidel(iterEqs,x,tol = 1.0e-9):
    
    omega = 1.0
    k = 10
    p = 1
    for i in range(1,501):
        xOld = x.copy()
        x = iterEqs(x,omega)
        dx = sqrt(dot(x-xOld,x-xOld))
        if dx < tol: return x,i,omega
      # Compute relaxation factor after k+p iterations
        if i == k: dx1 = dx
        if i == k + p:
            dx2 = dx
            omega = 2.0/(1.0 + sqrt(1.0 - (dx2/dx1)**(1.0/p)))
    print 'Gauss-Seidel failed to converge'    
Пример #50
0
def find(P, p):
   ''' In the 2D neighbourhood P, find the point closest to p.
      The approach is based on the selection of a trial value Pt, from 
P, and
      discarding all values further than Pt from p.
      To avoid repeated sqrt calculations the discard is based on an
      enclosing square.
      '''
   global lengthRemaining, trial
   lengthRemaining+= [[P.shape[0]]]
   Pz= P - p                             # zero based neighbourhood
   while len(Pz):
     Pt= Pz[0]                           # trial value
     Pta= N.abs(Pt)
     Pz= Pz[1:]
     pd= math.sqrt(N.dot(Pta, Pta))      # distance of p from the trial 
Пример #51
0
def makeSPlot(categories, samples):
    """Construct the sPlot weights function.

    'categories' -- A sequence of categories.  Each is a pair '(pdf,
    num)', where 'pdf' is the normalized PDF of the *reduced* variables
    for the category, and 'num' is the number of samples in that
    category.

    'samples' -- A iterable of samples.

    returns -- A function that takes a sample as its argument and
    returns an array of sPlot weights for the given categories."""

    num_categories = len(categories)

    # Split the 'categories' argument into a sequence of PDFs and an
    # array of numbers.
    pdfs = [ f for (f, n) in categories ]
    nums = numarray.array([ n for (f, n) in categories ], type="Float64")

    # Accumulate the inverse of the covariance matrix.
    V_inv = numarray.zeros((num_categories, num_categories), "Float64")
    for sample in samples:
        # Evaluate PDFs for this sample.
        pds = numarray.array([ f(sample) for f in pdfs ])
        # Compute the contribution for this sample.
        denominator = numarray.dot(nums, pds) ** 2
        if denominator != 0:
            V_inv += outer_product(pds, pds) / denominator
    # Invert to obtain the covariance matrix.
    V = inverse(V_inv)

    def getWeights(sample):
        # Evaluate PDFs for this sample.
        pds = numarray.array([ f(sample) for f in pdfs ])
        # Compute an array of the weights.
        denominator = numarray.dot(nums, pds)
        if denominator != 0:
            return numarray.matrixmultiply(V, pds) / denominator
        else:
            return numarray.zeros((num_categories, ), "Float32")

    getWeights.covariance_matrix = V
    return getWeights
Пример #52
0
def solve_mec_3constr(constraint_fnl, n = 30, initparams = None):
    delta = 1e-3
    if initparams:
        params = N.array(initparams)
    else:
        params = [3.14, 0, 0]
    for i in range(n):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1c, c2c, c3c = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, c3c

        dc1s = []
        dc2s = []
        dc3s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, c3p = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, c3m = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
            dc3s.append((c3p - c3m) / (2 * delta))

        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, dc3s])
        #print jm
        ji = la.inverse(jm)

        dp = N.dot(ji, [c1c, c2c, c3c])
        if i < n/2: scale = .25
        else: scale = 1
        params -= scale * dp
        print '%', params
    return params
Пример #53
0
def solve_mec_3constr(constraint_fnl, n=30, initparams=None):
    delta = 1e-3
    if initparams:
        params = N.array(initparams)
    else:
        params = [3.14, 0, 0]
    for i in range(n):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1c, c2c, c3c = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, c3c

        dc1s = []
        dc2s = []
        dc3s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, c3p = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, c3m = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
            dc3s.append((c3p - c3m) / (2 * delta))

        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, dc3s])
        #print jm
        ji = la.inverse(jm)

        dp = N.dot(ji, [c1c, c2c, c3c])
        if i < n / 2: scale = .25
        else: scale = 1
        params -= scale * dp
        print '%', params
    return params
def plotXYSVG(drawSpace, dataX, dataY, rank=0, dataLabel=[], plotColor = "black", axesColor="black", labelColor="black", symbolColor="red", XLabel=None, YLabel=None, title=None, fitcurve=None, connectdot=1, displayR=None, loadingPlot = 0, offset= (80, 20, 40, 60), zoom = 1, specialCases=[], showLabel = 1):
    'displayR : correlation scatter plot, loadings : loading plot'

    dataXRanked, dataYRanked = webqtlUtil.calRank(dataX, dataY, len(dataX))

    # Switching Ranked and Unranked X and Y values if a Spearman Rank Correlation
    if rank == 0:
        dataXPrimary = dataX
        dataYPrimary = dataY
        dataXAlt = dataXRanked
        dataYAlt = dataYRanked

    else:
        dataXPrimary = dataXRanked
        dataYPrimary = dataYRanked
        dataXAlt = dataX
        dataYAlt = dataY



    xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset
    plotWidth = drawSpace.attributes['width'] - xLeftOffset - xRightOffset
    plotHeight = drawSpace.attributes['height'] - yTopOffset - yBottomOffset
    if plotHeight<=0 or plotWidth<=0:
        return
    if len(dataXPrimary) < 1 or  len(dataXPrimary) != len(dataYPrimary) or (dataLabel and len(dataXPrimary) != len(dataLabel)):
        return

    max_X=max(dataXPrimary)
    min_X=min(dataXPrimary)
    max_Y=max(dataYPrimary)
    min_Y=min(dataYPrimary)

    #for some reason I forgot why I need to do this
    if loadingPlot:
        min_X = min(-0.1,min_X)
        max_X = max(0.1,max_X)
        min_Y = min(-0.1,min_Y)
        max_Y = max(0.1,max_Y)

    xLow, xTop, stepX=detScale(min_X,max_X)
    yLow, yTop, stepY=detScale(min_Y,max_Y)
    xScale = plotWidth/(xTop-xLow)
    yScale = plotHeight/(yTop-yLow)

    #draw drawing region
    r = svg.rect(xLeftOffset, yTopOffset, plotWidth,  plotHeight, 'none', axesColor, 1)
    drawSpace.addElement(r)

    #calculate data points
    data = map(lambda X, Y: (X, Y), dataXPrimary, dataYPrimary)
    xCoord = map(lambda X, Y: ((X-xLow)*xScale + xLeftOffset, yTopOffset+plotHeight-(Y-yLow)*yScale), dataXPrimary, dataYPrimary)
    labelFontF = "verdana"
    labelFontS = 11

    if loadingPlot:
        xZero = -xLow*xScale+xLeftOffset
        yZero = yTopOffset+plotHeight+yLow*yScale
        for point in xCoord:
            drawSpace.addElement(svg.line(xZero,yZero,point[0],point[1], "red", 1))
    else:
        if connectdot:
            pass
            #drawSpace.drawPolygon(xCoord,edgeColor=plotColor,closed=0)
        else:
            pass

    for i, item in enumerate(xCoord):
        if dataLabel and dataLabel[i] in specialCases:
            drawSpace.addElement(svg.rect(item[0]-3, item[1]-3, 6, 6, "none", "green", 0.5))
            #drawSpace.drawCross(item[0],item[1],color=pid.blue,size=5)
        else:
            drawSpace.addElement(svg.line(item[0],item[1]+5,item[0],item[1]-5,symbolColor,1))
            drawSpace.addElement(svg.line(item[0]+5,item[1],item[0]-5,item[1],symbolColor,1))
        if showLabel and dataLabel:
            pass
            drawSpace.addElement(svg.text(item[0], item[1]+14, dataLabel[i], labelFontS,
                    labelFontF, text_anchor="middle", style="stroke:blue;stroke-width:0.5;"))
            #canvas.drawString(, item[0]- canvas.stringWidth(dataLabel[i],
            #       font=labelFont)/2, item[1]+14, font=labelFont, color=pid.blue)

    #draw scale
    #scaleFont=pid.Font(ttf="cour",size=14,bold=1)
    x=xLow
    for i in range(stepX+1):
        xc=xLeftOffset+(x-xLow)*xScale
        drawSpace.addElement(svg.line(xc,yTopOffset+plotHeight,xc,yTopOffset+plotHeight+5, axesColor, 1))
        strX = cformat(d=x, rank=rank)
        drawSpace.addElement(svg.text(xc,yTopOffset+plotHeight+20,strX,13, "courier", text_anchor="middle"))
        x+= (xTop - xLow)/stepX

    y=yLow
    for i in range(stepY+1):
        yc=yTopOffset+plotHeight-(y-yLow)*yScale
        drawSpace.addElement(svg.line(xLeftOffset,yc,xLeftOffset-5,yc, axesColor, 1))
        strY = cformat(d=y, rank=rank)
        drawSpace.addElement(svg.text(xLeftOffset-10,yc+5,strY,13, "courier", text_anchor="end"))
        y+= (yTop - yLow)/stepY

    #draw label
    labelFontF = "verdana"
    labelFontS = 17
    if XLabel:
        drawSpace.addElement(svg.text(xLeftOffset+plotWidth/2.0,
                yTopOffset+plotHeight+yBottomOffset-10,XLabel,
                labelFontS, labelFontF, text_anchor="middle"))

    if YLabel:
        drawSpace.addElement(svg.text(xLeftOffset-50,
                 yTopOffset+plotHeight/2,YLabel,
                labelFontS, labelFontF, text_anchor="middle", style="writing-mode:tb-rl", transform="rotate(270 %d %d)" % (xLeftOffset-50,  yTopOffset+plotHeight/2)))
        #drawSpace.drawString(YLabel, xLeftOffset-50, yTopOffset+plotHeight- (plotHeight-drawSpace.stringWidth(YLabel,font=labelFont))/2.0,
        #       font=labelFont,color=labelColor,angle=90)


    if fitcurve:
        sys.argv = [ "mod_python" ]
        #from numarray import linear_algebra as la
        #from numarray import ones, array, dot, swapaxes
        fitYY = array(dataYPrimary)
        fitXX = array([ones(len(dataXPrimary)),dataXPrimary])
        AA = dot(fitXX,swapaxes(fitXX,0,1))
        BB = dot(fitXX,fitYY)
        bb = la.linear_least_squares(AA,BB)[0]

        xc1 = xLeftOffset
        yc1 = yTopOffset+plotHeight-(bb[0]+bb[1]*xLow-yLow)*yScale
        if yc1 > yTopOffset+plotHeight:
            yc1 = yTopOffset+plotHeight
            xc1 = (yLow-bb[0])/bb[1]
            xc1=(xc1-xLow)*xScale+xLeftOffset
        elif yc1 < yTopOffset:
            yc1 = yTopOffset
            xc1 = (yTop-bb[0])/bb[1]
            xc1=(xc1-xLow)*xScale+xLeftOffset
        else:
            pass

        xc2 = xLeftOffset + plotWidth
        yc2 = yTopOffset+plotHeight-(bb[0]+bb[1]*xTop-yLow)*yScale
        if yc2 > yTopOffset+plotHeight:
            yc2 = yTopOffset+plotHeight
            xc2 = (yLow-bb[0])/bb[1]
            xc2=(xc2-xLow)*xScale+xLeftOffset
        elif yc2 < yTopOffset:
            yc2 = yTopOffset
            xc2 = (yTop-bb[0])/bb[1]
            xc2=(xc2-xLow)*xScale+xLeftOffset
        else:
            pass

        drawSpace.addElement(svg.line(xc1,yc1,xc2,yc2,"green", 1))

    if displayR:
        labelFontF = "trebuc"
        labelFontS = 14
        NNN = len(dataX)

        corr = webqtlUtil.calCorrelation(dataXPrimary,dataYPrimary,NNN)[0]

        if NNN < 3:
            corrPValue = 1.0
        else:
            if abs(corr) >= 1.0:
                corrPValue = 0.0
            else:
                ZValue = 0.5*log((1.0+corr)/(1.0-corr))
                ZValue = ZValue*sqrt(NNN-3)
                corrPValue = 2.0*(1.0 - reaper.normp(abs(ZValue)))

        NStr = "N of Cases=%d" % NNN

        if rank == 1:
            corrStr = "Spearman's r=%1.3f P=%3.2E" % (corr, corrPValue)
        else:
            corrStr = "Pearson's r=%1.3f P=%3.2E" % (corr, corrPValue)

        drawSpace.addElement(svg.text(xLeftOffset,yTopOffset-10,NStr,
                labelFontS, labelFontF, text_anchor="start"))
        drawSpace.addElement(svg.text(xLeftOffset+plotWidth,yTopOffset-25,corrStr,
                labelFontS, labelFontF, text_anchor="end"))
    """
    """
    return
Пример #55
0
 def getAngle(self, other):
     distance = dot(self.array, other.array)
     cosine = distance / (self.getNorm() * other.getNorm())
     angle = arccos(cosine)
     return angle
Пример #56
0
 def getNorm(self):
     return sqrt(dot(self.array, self.array))
Пример #57
0
def solve_mec(constraint_fnl):
    delta = 1e-3
    params = [pi, 0, 0]
    for i in range(20):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        #print i * .05, 'setgray'
        #plot(xys)
        c1c, c2c, costc = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, 'cost =', costc

        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, costp = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, costm = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
        xp = cross_prod(dc1s, dc2s)
        xp = N.divide(xp, sqrt(N.dot(xp, xp)))  # Normalize to unit length

        print '% dc1s =', dc1s
        print '% dc2s =', dc2s
        print '% xp =', xp

        # Compute second derivative wrt orthogonal vec
        params1 = N.array(params)
        for j in range(len(params)):
            params1[j] += delta * xp[j]
        k, lam1, lam2 = params1
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1p, c2p, costp = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl+ =', c1p, c2p, 'cost =', costp
        params1 = N.array(params)
        for j in range(len(params)):
            params1[j] -= delta * xp[j]
        k, lam1, lam2 = params1
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1m, c2m, costm = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl- =', c1m, c2m, 'cost =', costm
        d2cost = (costp + costm - 2 * costc) / (delta * delta)
        dcost = (costp - costm) / (2 * delta)

        print '% dcost =', dcost, 'd2cost =', d2cost
        if d2cost < 0: d2cost = .1
        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, [x * d2cost for x in xp]])
        #print jm
        ji = la.inverse(jm)
        #print ji

        dp = N.dot(ji, [c1c, c2c, dcost])
        print '% dp =', dp
        print '% [right]=', [c1c, c2c, dcost]
        params -= dp * .1
        print '%', params
        sys.stdout.flush()
    return params
Пример #58
0
 def __imul__(self,other):
     aother = asarray(other)
     self[:] = dot(self, aother)
     return self