def inverse(a, d0=None):
    if shape(a) == (2, 2):
        _a = array(shape=shape(a), type=Float64)
        ai = array(shape=shape(a), type=Float64)
        _a[:] = a[:]
        if d0 == None: d0 = determinant(_a)
        ai[0, 0] = _a[1, 1]
        ai[0, 1] = -_a[0, 1]
        ai[1, 0] = -_a[1, 0]
        ai[1, 1] = _a[0, 0]
        return ai / d0

    elif shape(a) == (3, 3):
        _a = array(shape=shape(a), type=Float64)
        ai = array(shape=shape(a), type=Float64)
        _a[:] = a[:]
        if d0 == None: d0 = determinant(_a)

        ai[0, 0] = _a[1, 1] * _a[2, 2] - _a[2, 1] * _a[1, 2]
        ai[0, 1] = _a[2, 1] * _a[0, 2] - _a[0, 1] * _a[2, 2]
        ai[0, 2] = _a[0, 1] * _a[1, 2] - _a[1, 1] * _a[0, 2]

        ai[1, 0] = _a[2, 0] * _a[1, 2] - _a[1, 0] * _a[2, 2]
        ai[1, 1] = _a[0, 0] * _a[2, 2] - _a[2, 0] * _a[0, 2]
        ai[1, 2] = _a[1, 0] * _a[0, 2] - _a[0, 0] * _a[1, 2]

        ai[2, 0] = _a[1, 0] * _a[2, 1] - _a[2, 0] * _a[1, 1]
        ai[2, 1] = _a[2, 0] * _a[0, 1] - _a[0, 0] * _a[2, 1]
        ai[2, 2] = _a[0, 0] * _a[1, 1] - _a[1, 0] * _a[0, 1]

        return ai / d0

    return linear_algebra.inverse(a)
Exemplo n.º 2
0
class _Matrix(NumArray):
    def _rc(self, a):
        if len(shape(a)) == 0:
            return a
        else:
            return Matrix(a)

    def __mul__(self, other):
        aother = asarray(other)
        if len(aother.shape) == 0:
            return self._rc(self * aother)
        else:
            return self._rc(_dot(self, aother))

    def __rmul__(self, other):
        aother = asarray(other)
        if len(aother.shape) == 0:
            return self._rc(aother * self)
        else:
            return self._rc(_dot(aother, self))

    def __imul__(self, other):
        aother = asarray(other)
        self[:] = _dot(self, aother)
        return self

    def __pow__(self, other):
        shape = self.shape
        if len(shape) != 2 or shape[0] != shape[1]:
            raise TypeError, "matrix is not square"
        if type(other) not in (type(1), type(1L)):
            raise TypeError, "exponent must be an integer"
        if other == 0:
            return Matrix(identity(shape[0]))
        if other < 0:
            result = Matrix(LinearAlgebra.inverse(self))
            x = Matrix(result)
            other = -other
        else:
            result = self
            x = result
        if other <= 3:
            while (other > 1):
                result = result * x
                other = other - 1
            return result
        # binary decomposition to reduce the number of Matrix
        #  Multiplies for other > 3.
        beta = _binary(other)
        t = len(beta)
        Z, q = x.copy(), 0
        while beta[t - q - 1] == '0':
            Z *= Z
            q += 1
        result = Z.copy()
        for k in range(q + 1, t):
            Z *= Z
            if beta[t - k - 1] == '1':
                result *= Z
        return result
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
                               CONV_THRESH=1.e-3,MAXIT=500):
    """
 Faster than logistic_regression when there is only one predictor.
    """
    if len(x) != len(y):
        raise ValueError, "x and y should be the same length!"
    if beta_start is None:
        beta_start = NA.zeros(2,x.typecode())
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
        s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)])  # scoring function
        # information matrix
        J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
                          [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    return beta, J_bar, l
Exemplo n.º 4
0
 def __pow__(self, other):
     shape = self.shape
     if len(shape)!=2 or shape[0]!=shape[1]:
         raise TypeError, "matrix is not square"
     if type(other) not in (type(1), type(1L)):
         raise TypeError, "exponent must be an integer"        
     if other==0:
         return Matrix(identity(shape[0]))
     if other<0:
         result=Matrix(LinearAlgebra.inverse(self))
         x=Matrix(result)
         other=-other
     else:
         result=self
         x=result
     if other <= 3:
         while(other>1):
             result=result*x
             other=other-1
         return result
     # binary decomposition to reduce the number of Matrix
     #  Multiplies for other > 3.
     beta = _binary(other)            
     t = len(beta)
     Z,q = x.copy(),0
     while beta[t-q-1] == '0':
         Z *= Z
         q += 1
     result = Z.copy()
     for k in range(q+1,t):
         Z *= Z
         if beta[t-k-1] == '1':
             result *= Z
     return result
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
                               CONV_THRESH=1.e-3,MAXIT=500):
    """
 Faster than logistic_regression when there is only one predictor.
    """
    if len(x) != len(y):
        raise ValueError, "x and y should be the same length!"
    if beta_start is None:
        beta_start = NA.zeros(2,x.dtype.char)
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
        s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)])  # scoring function
        # information matrix
        J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
                          [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    return beta, J_bar, l
def inverse(a, d0 = None):
	if shape(a) == (2,2):
		_a = array(shape=shape(a), type=Float64)
		ai = array(shape=shape(a), type=Float64)
		_a[:] = a[:]
		if d0 == None: d0 = determinant(_a)
		ai[0,0] = _a[1,1]
		ai[0,1] = -_a[0,1]
		ai[1,0] = -_a[1,0]
		ai[1,1] = _a[0,0]
		return ai/d0

	elif shape(a) == (3,3):
		_a = array(shape=shape(a), type=Float64)
		ai = array(shape=shape(a), type=Float64)
		_a[:] = a[:]
		if d0 == None: d0 = determinant(_a)

		ai[0,0] = _a[1,1] * _a[2,2] - _a[2,1] * _a[1,2]
		ai[0,1] = _a[2,1] * _a[0,2] - _a[0,1] * _a[2,2]
		ai[0,2] = _a[0,1] * _a[1,2] - _a[1,1] * _a[0,2]

		ai[1,0] = _a[2,0] * _a[1,2] - _a[1,0] * _a[2,2]
		ai[1,1] = _a[0,0] * _a[2,2] - _a[2,0] * _a[0,2]
		ai[1,2] = _a[1,0] * _a[0,2] - _a[0,0] * _a[1,2]

		ai[2,0] = _a[1,0] * _a[2,1] - _a[2,0] * _a[1,1]
		ai[2,1] = _a[2,0] * _a[0,1] - _a[0,0] * _a[2,1]
		ai[2,2] = _a[0,0] * _a[1,1] - _a[1,0] * _a[0,1]

		return ai/d0

	return linear_algebra.inverse(a)
Exemplo n.º 7
0
    def __init__(self, p, webmatrix):
        assert p>=0 and p <= 1
        self.p = float(p)
        if type(webmatrix)in [type([]), type(())]:
            webmatrix = array(webmatrix, Float)
        assert webmatrix.shape[0]==webmatrix.shape[1]
        self.webmatrix = webmatrix

        # create the deltamatrix
        imatrix = identity(webmatrix.shape[0], Float)
        for i in range(webmatrix.shape[0]):
            imatrix[i] = imatrix[i]*sum(webmatrix[i,:])
        deltamatrix = la.inverse(imatrix)
        self.deltamatrix = deltamatrix

        # create the fmatrix
        self.fmatrix = ones(webmatrix.shape, Float)

        self.sigma = webmatrix.shape[0]

        # calculate the Stochastic matrix
        _f_normalized = (self.sigma**-1)*self.fmatrix
        _randmatrix = (1-p)*_f_normalized

        _linkedmatrix = p * matrixmultiply(deltamatrix, webmatrix)
        M = _randmatrix + _linkedmatrix
        
        self.stochasticmatrix = M

        self.invariantmeasure = ones((1, webmatrix.shape[0]), Float)
Exemplo n.º 8
0
def findmec(th0, th1):
    delta = 1e-3
    if th0 < 0 and th0 - th1 < .1:
        sm = 5.
        sp = 6.
    else:
        sm = 3.
        sp = 5.
    params = [sm, sp]
    lasterr = 1e6
    for i in range(25):
        sm, sp = params
        ath0, ath1 = trymec(sm, sp)
        c1c, c2c = th0 - ath0, th1 - ath1

        err = c1c * c1c + c2c * c2c
        if 0:
            print '%findmec', sm, sp, ath0, ath1, err
            sys.stdout.flush()

        if err < 1e-9:
            return params
        if err > lasterr:
            return None
        lasterr = err


        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1p, c2p = th0 - ath0, th1 - ath1

            params1 = N.array(params)
            params1[j] -= delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1m, c2m = th0 - ath0, th1 - ath1

            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))

        jm = N.array([dc1s, dc2s])
        ji = la.inverse(jm)
        dp = N.dot(ji, [c1c, c2c])

        if i < 4:
            scale = .5
        else:
            scale = 1
        params -= scale * dp
        if params[0] < 0: params[0] = 0.
    return params
Exemplo n.º 9
0
def findmec(th0, th1):
    delta = 1e-3
    if th0 < 0 and th0 - th1 < .1:
        sm = 5.
        sp = 6.
    else:
        sm = 3.
        sp = 5.
    params = [sm, sp]
    lasterr = 1e6
    for i in range(25):
        sm, sp = params
        ath0, ath1 = trymec(sm, sp)
        c1c, c2c = th0 - ath0, th1 - ath1

        err = c1c * c1c + c2c * c2c
        if 0:
            print '%findmec', sm, sp, ath0, ath1, err
            sys.stdout.flush()

        if err < 1e-9:
            return params
        if err > lasterr:
            return None
        lasterr = err

        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1p, c2p = th0 - ath0, th1 - ath1

            params1 = N.array(params)
            params1[j] -= delta
            sm, sp = params1
            ath0, ath1 = trymec(sm, sp)
            c1m, c2m = th0 - ath0, th1 - ath1

            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))

        jm = N.array([dc1s, dc2s])
        ji = la.inverse(jm)
        dp = N.dot(ji, [c1c, c2c])

        if i < 4:
            scale = .5
        else:
            scale = 1
        params -= scale * dp
        if params[0] < 0: params[0] = 0.
    return params
Exemplo n.º 10
0
    def toWorld(self,v,trl=False):
        """Transform a vertex from camera to world coordinates.

        The specified vector can have 3 or 4 (homogoneous) components.
        This uses the currently saved rotation matrix.
        """
        a = la.inverse(array(self.rot))
        if len(v) == 3:
            v = v + [ 1. ]
        v = matrixmultiply(array(v),a)
        return v[0:3] / v[3]
Exemplo n.º 11
0
 def setNewX(self):
     """Calculate new X values"""
     
     Jacobian = linear_algebra.inverse(self.Jacobian)
     
     self.DeltaX = matrixmultiply(Jacobian, self.OldY)
     
     self.NewX = self.OldX-self.DeltaX
     
     for h in range(self.HarmonicSize)
         if self.NewX > self.MAX_X:
             NewX[i] = complex(1.0)
Exemplo n.º 12
0
def regress(X):
    """
    Apply a linear regression on X.

    X is an numarray array of form [Y|X].
    We split this up, do the regression and return the
    regression coefficients, beta_i.
    """
    X = N.array(X)
    Y = X[:,0].copy()
    X[:,0] = 1.0
    a = N.dot(NL.inverse(N.dot(N.transpose(X), X)), N.transpose(X))
    return N.dot(a, Y)
Exemplo n.º 13
0
def makeSPlot(categories, samples):
    """Construct the sPlot weights function.

    'categories' -- A sequence of categories.  Each is a pair '(pdf,
    num)', where 'pdf' is the normalized PDF of the *reduced* variables
    for the category, and 'num' is the number of samples in that
    category.

    'samples' -- A iterable of samples.

    returns -- A function that takes a sample as its argument and
    returns an array of sPlot weights for the given categories."""

    num_categories = len(categories)

    # Split the 'categories' argument into a sequence of PDFs and an
    # array of numbers.
    pdfs = [ f for (f, n) in categories ]
    nums = numarray.array([ n for (f, n) in categories ], type="Float64")

    # Accumulate the inverse of the covariance matrix.
    V_inv = numarray.zeros((num_categories, num_categories), "Float64")
    for sample in samples:
        # Evaluate PDFs for this sample.
        pds = numarray.array([ f(sample) for f in pdfs ])
        # Compute the contribution for this sample.
        denominator = numarray.dot(nums, pds) ** 2
        if denominator != 0:
            V_inv += outer_product(pds, pds) / denominator
    # Invert to obtain the covariance matrix.
    V = inverse(V_inv)

    def getWeights(sample):
        # Evaluate PDFs for this sample.
        pds = numarray.array([ f(sample) for f in pdfs ])
        # Compute an array of the weights.
        denominator = numarray.dot(nums, pds)
        if denominator != 0:
            return numarray.matrixmultiply(V, pds) / denominator
        else:
            return numarray.zeros((num_categories, ), "Float32")

    getWeights.covariance_matrix = V
    return getWeights
Exemplo n.º 14
0
def solve_mec_3constr(constraint_fnl, n = 30, initparams = None):
    delta = 1e-3
    if initparams:
        params = N.array(initparams)
    else:
        params = [3.14, 0, 0]
    for i in range(n):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1c, c2c, c3c = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, c3c

        dc1s = []
        dc2s = []
        dc3s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, c3p = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, c3m = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
            dc3s.append((c3p - c3m) / (2 * delta))

        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, dc3s])
        #print jm
        ji = la.inverse(jm)

        dp = N.dot(ji, [c1c, c2c, c3c])
        if i < n/2: scale = .25
        else: scale = 1
        params -= scale * dp
        print '%', params
    return params
Exemplo n.º 15
0
def solve_mec_3constr(constraint_fnl, n=30, initparams=None):
    delta = 1e-3
    if initparams:
        params = N.array(initparams)
    else:
        params = [3.14, 0, 0]
    for i in range(n):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1c, c2c, c3c = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, c3c

        dc1s = []
        dc2s = []
        dc3s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, c3p = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, c3m = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
            dc3s.append((c3p - c3m) / (2 * delta))

        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, dc3s])
        #print jm
        ji = la.inverse(jm)

        dp = N.dot(ji, [c1c, c2c, c3c])
        if i < n / 2: scale = .25
        else: scale = 1
        params -= scale * dp
        print '%', params
    return params
Exemplo n.º 16
0
#境界条件
T[0][0] = 300.0  #温度固定
T[M][0] = T[M - 1][0]  #断熱

#行列の作成
A = numarray.array([[0. for i in range(M + 1)] for j in range(M + 1)])
for i in range(1, M):
    A[i][i - 1] = -a
    A[i][i] = 1 + 2 * a
    A[i][i + 1] = -a
#境界条件
A[0][0] = 1.  #一定温度
A[M][M] = 1.  #断熱

#Aの逆行列
A_inv = la.inverse(A)

f = open('output', 'w')

for j in range(1, N):
    preT = T

    T = numarray.dot(A_inv, preT)
    T[0][0] = 300.0
    T[M][0] = T[M - 1][0]

    #計算結果をファイルへ出力
    f.write('# t=%ss\n' % (j * dt))
    for i in range(M + 1):
        f.write('%s, %s\n' % (i * dx, T[i][0]))
    f.write('\n\n')
def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,
                        MAXIT=500):
    """
 Uses the Newton-Raphson algorithm to calculate a maximum
 likelihood estimate logistic regression.
 The algorithm is known as 'iteratively re-weighted least squares', or IRLS.

 x - rank-1 or rank-2 array of predictors. If x is rank-2,
     the number of predictors = x.shape[0] = N.  If x is rank-1,
     it is assumed N=1.
     
 y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x))
 
 beta_start - initial beta vector (default zeros(N+1,x.dtype.char))
 
 if verbose=True, diagnostics printed for each iteration (default False).
 
 MAXIT - max number of iterations (default 500)
 
 CONV_THRESH - convergence threshold (sum of absolute differences
  of beta-beta_old, default 0.001)

 returns beta (the logistic regression coefficients, an N+1 element vector),
 J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood).
 
 J_bar can be used to estimate the covariance matrix and the standard
 error for beta.
 
 l can be used for a chi-squared significance test.

 covmat = inverse(J_bar)     --> covariance matrix of coefficents (beta)
 stderr = sqrt(diag(covmat)) --> standard errors for beta
 deviance = -2l              --> scaled deviance statistic
 chi-squared value for -2l is the model chi-squared test.
    """
    if x.shape[-1] != len(y):
        raise ValueError, "x.shape[-1] and y should be the same length!"
    try:
        N, npreds = x.shape[1], x.shape[0]
    except: # single predictor, use simple logistic regression routine.
        return _simple_logistic_regression(x,y,beta_start=beta_start,
               CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)
    if beta_start is None:
        beta_start = NA.zeros(npreds+1,x.dtype.char)
    X = NA.ones((npreds+1,N), x.dtype.char)
    X[1:, :] = x
    Xt = NA.transpose(X)
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        ebx = NA.exp(NA.dot(beta, X))
        p = ebx/(1.+ebx)
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood
        s = NA.dot(X, y-p)                            # scoring function
        J_bar = NA.dot(X*p,Xt)                        # information matrix
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    if iter == MAXIT and diff > CONV_THRESH: 
        print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)
    return beta, J_bar, l
Exemplo n.º 18
0
def solve_mec(constraint_fnl):
    delta = 1e-3
    params = [pi, 0, 0]
    for i in range(20):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        #print i * .05, 'setgray'
        #plot(xys)
        c1c, c2c, costc = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, 'cost =', costc

        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, costp = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, costm = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
        xp = cross_prod(dc1s, dc2s)
        xp = N.divide(xp, sqrt(N.dot(xp, xp))) # Normalize to unit length

        print '% dc1s =', dc1s
        print '% dc2s =', dc2s
        print '% xp =', xp
        
        # Compute second derivative wrt orthogonal vec
        params1 = N.array(params)
        for j in range(len(params)):
            params1[j] += delta * xp[j]
        k, lam1, lam2 = params1
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1p, c2p, costp = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl+ =', c1p, c2p, 'cost =', costp
        params1 = N.array(params)
        for j in range(len(params)):
            params1[j] -= delta * xp[j]
        k, lam1, lam2 = params1
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1m, c2m, costm = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl- =', c1m, c2m, 'cost =', costm
        d2cost = (costp + costm - 2 * costc) / (delta * delta)
        dcost = (costp - costm) / (2 * delta)

        print '% dcost =', dcost, 'd2cost =', d2cost
        if d2cost < 0: d2cost = .1
        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, [x * d2cost for x in xp]])
        #print jm
        ji = la.inverse(jm)
        #print ji

        dp = N.dot(ji, [c1c, c2c, dcost])
        print '% dp =', dp
        print '% [right]=', [c1c, c2c, dcost]
        params -= dp * .1
        print '%', params
        sys.stdout.flush()
    return params
Exemplo n.º 19
0
def solve_mec(constraint_fnl):
    delta = 1e-3
    params = [pi, 0, 0]
    for i in range(20):
        k, lam1, lam2 = params
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        #print i * .05, 'setgray'
        #plot(xys)
        c1c, c2c, costc = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl =', c1c, c2c, 'cost =', costc

        dc1s = []
        dc2s = []
        for j in range(len(params)):
            params1 = N.array(params)
            params1[j] += delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1p, c2p, costp = constraint_fnl(cost, x, y, th)
            params1 = N.array(params)
            params1[j] -= delta
            k, lam1, lam2 = params1
            xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
            c1m, c2m, costm = constraint_fnl(cost, x, y, th)
            dc1s.append((c1p - c1m) / (2 * delta))
            dc2s.append((c2p - c2m) / (2 * delta))
        xp = cross_prod(dc1s, dc2s)
        xp = N.divide(xp, sqrt(N.dot(xp, xp)))  # Normalize to unit length

        print '% dc1s =', dc1s
        print '% dc2s =', dc2s
        print '% xp =', xp

        # Compute second derivative wrt orthogonal vec
        params1 = N.array(params)
        for j in range(len(params)):
            params1[j] += delta * xp[j]
        k, lam1, lam2 = params1
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1p, c2p, costp = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl+ =', c1p, c2p, 'cost =', costp
        params1 = N.array(params)
        for j in range(len(params)):
            params1[j] -= delta * xp[j]
        k, lam1, lam2 = params1
        xys, cost, x, y, th = run_elastica(-.5, .5, k, lam1, lam2)
        c1m, c2m, costm = constraint_fnl(cost, x, y, th)
        print '% constraint_fnl- =', c1m, c2m, 'cost =', costm
        d2cost = (costp + costm - 2 * costc) / (delta * delta)
        dcost = (costp - costm) / (2 * delta)

        print '% dcost =', dcost, 'd2cost =', d2cost
        if d2cost < 0: d2cost = .1
        # Make Jacobian matrix to invert
        jm = N.array([dc1s, dc2s, [x * d2cost for x in xp]])
        #print jm
        ji = la.inverse(jm)
        #print ji

        dp = N.dot(ji, [c1c, c2c, dcost])
        print '% dp =', dp
        print '% [right]=', [c1c, c2c, dcost]
        params -= dp * .1
        print '%', params
        sys.stdout.flush()
    return params
 # random draws from trivariate normal distribution
 x = multivariate_normal(NA.array([0,0,0]),NA.array([[1,r12,r13],[r12,1,r23],[r13,r23,1]]), nsamps)
 x2 = multivariate_normal(NA.array([0,0,0]),NA.array([[1,r12,r13],[r12,1,r23],[r13,r23,1]]), nsamps)
 print 'correlations (r12,r13,r23) = ',r12,r13,r23
 print 'number of realizations = ',nsamps
 # training data.
 obs = x[:,0]
 climprob = NA.sum((obs > 0).astype('f'))/nsamps
 fcst = NA.transpose(x[:,1:]) # 2 predictors.
 obs_binary = obs > 0.
 # independent data for verification.
 obs2 = x2[:,0]
 fcst2 = NA.transpose(x2[:,1:])
 # compute logistic regression.
 beta,Jbar,llik = logistic_regression(fcst,obs_binary,verbose=True)
 covmat = LA.inverse(Jbar)
 stderr = NA.sqrt(mlab.diag(covmat))
 print 'beta =' ,beta
 print 'standard error =',stderr
 # forecasts from independent data.
 prob = calcprob(beta, fcst2)
 # compute Brier Skill Score
 verif = (obs2 > 0.).astype('f')
 bs = mlab.mean((0.01*prob - verif)**2)
 bsclim = mlab.mean((climprob - verif)**2)
 bss = 1.-(bs/bsclim)
 print 'Brier Skill Score (should be within +/- 0.1 of 0.18) = ',bss
 # calculate reliability. 
 # see http://www.bom.gov.au/bmrc/wefor/staff/eee/verif/verif_web_page.html
 # for information on the Brier Skill Score and reliability diagrams.
 totfreq = NA.zeros(10,'f')
def logistic_regression(x,
                        y,
                        beta_start=None,
                        verbose=False,
                        CONV_THRESH=1.e-3,
                        MAXIT=500):
    """
 Uses the Newton-Raphson algorithm to calculate a maximum
 likelihood estimate logistic regression.
 The algorithm is known as 'iteratively re-weighted least squares', or IRLS.

 x - rank-1 or rank-2 array of predictors. If x is rank-2,
     the number of predictors = x.shape[0] = N.  If x is rank-1,
     it is assumed N=1.
     
 y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x))
 
 beta_start - initial beta vector (default zeros(N+1,x.dtype.char))
 
 if verbose=True, diagnostics printed for each iteration (default False).
 
 MAXIT - max number of iterations (default 500)
 
 CONV_THRESH - convergence threshold (sum of absolute differences
  of beta-beta_old, default 0.001)

 returns beta (the logistic regression coefficients, an N+1 element vector),
 J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood).
 
 J_bar can be used to estimate the covariance matrix and the standard
 error for beta.
 
 l can be used for a chi-squared significance test.

 covmat = inverse(J_bar)     --> covariance matrix of coefficents (beta)
 stderr = sqrt(diag(covmat)) --> standard errors for beta
 deviance = -2l              --> scaled deviance statistic
 chi-squared value for -2l is the model chi-squared test.
    """
    if x.shape[-1] != len(y):
        raise ValueError, "x.shape[-1] and y should be the same length!"
    try:
        N, npreds = x.shape[1], x.shape[0]
    except:  # single predictor, use simple logistic regression routine.
        return _simple_logistic_regression(x,
                                           y,
                                           beta_start=beta_start,
                                           CONV_THRESH=CONV_THRESH,
                                           MAXIT=MAXIT,
                                           verbose=verbose)
    if beta_start is None:
        beta_start = NA.zeros(npreds + 1, x.dtype.char)
    X = NA.ones((npreds + 1, N), x.dtype.char)
    X[1:, :] = x
    Xt = NA.transpose(X)
    iter = 0
    diff = 1.
    beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|'
    while iter < MAXIT:
        beta_old = beta
        ebx = NA.exp(NA.dot(beta, X))
        p = ebx / (1. + ebx)
        l = NA.sum(y * NA.log(p) +
                   (1. - y) * NA.log(1. - p))  # log-likeliehood
        s = NA.dot(X, y - p)  # scoring function
        J_bar = NA.dot(X * p, Xt)  # information matrix
        beta = beta_old + NA.dot(LA.inverse(J_bar), s)  # new value of beta
        diff = NA.sum(NA.fabs(beta - beta_old))  # sum of absolute differences
        if verbose:
            print iter + 1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    if iter == MAXIT and diff > CONV_THRESH:
        print 'warning: convergence not achieved with threshold of %s in %s iterations' % (
            CONV_THRESH, MAXIT)
    return beta, J_bar, l
 x2 = multivariate_normal(
     NA.array([0, 0, 0]),
     NA.array([[1, r12, r13], [r12, 1, r23], [r13, r23, 1]]), nsamps)
 print 'correlations (r12,r13,r23) = ', r12, r13, r23
 print 'number of realizations = ', nsamps
 # training data.
 obs = x[:, 0]
 climprob = NA.sum((obs > 0).astype('f')) / nsamps
 fcst = NA.transpose(x[:, 1:])  # 2 predictors.
 obs_binary = obs > 0.
 # independent data for verification.
 obs2 = x2[:, 0]
 fcst2 = NA.transpose(x2[:, 1:])
 # compute logistic regression.
 beta, Jbar, llik = logistic_regression(fcst, obs_binary, verbose=True)
 covmat = LA.inverse(Jbar)
 stderr = NA.sqrt(mlab.diag(covmat))
 print 'beta =', beta
 print 'standard error =', stderr
 # forecasts from independent data.
 prob = calcprob(beta, fcst2)
 # compute Brier Skill Score
 verif = (obs2 > 0.).astype('f')
 bs = mlab.mean((0.01 * prob - verif)**2)
 bsclim = mlab.mean((climprob - verif)**2)
 bss = 1. - (bs / bsclim)
 print 'Brier Skill Score (should be within +/- 0.1 of 0.18) = ', bss
 # calculate reliability.
 # see http://www.bom.gov.au/bmrc/wefor/staff/eee/verif/verif_web_page.html
 # for information on the Brier Skill Score and reliability diagrams.
 totfreq = NA.zeros(10, 'f')