def calc_CCuij(U, V): """Calculate the cooralation coefficent for anisotropic ADP tensors U and V. """ invU = linalg.inverse(U) invV = linalg.inverse(V) det_invU = linalg.determinant(invU) det_invV = linalg.determinant(invV) return (math.sqrt(math.sqrt(det_invU * det_invV)) / math.sqrt( (1.0 / 8.0) * linalg.determinant(invU + invV)))
def calc_CCuij(U, V): """Calculate the correlation coefficient for anisotropic ADP tensors U and V. """ ## FIXME: Check for non-positive Uij's, 2009-08-19 invU = linalg.inverse(U) invV = linalg.inverse(V) #invU = internal_inv3x3(U) #invV = internal_inv3x3(V) det_invU = linalg.determinant(invU) det_invV = linalg.determinant(invV) return ( math.sqrt(math.sqrt(det_invU * det_invV)) / math.sqrt((1.0/8.0) * linalg.determinant(invU + invV)) )
def calc_CCuij(U, V): """Calculate the correlation coefficient for anisotropic ADP tensors U and V. """ ## FIXME: Check for non-positive Uij's, 2009-08-19 invU = linalg.inverse(U) invV = linalg.inverse(V) #invU = internal_inv3x3(U) #invV = internal_inv3x3(V) det_invU = linalg.determinant(invU) det_invV = linalg.determinant(invV) return (math.sqrt(math.sqrt(det_invU * det_invV)) / math.sqrt( (1.0 / 8.0) * linalg.determinant(invU + invV)))
def KF(y, XF0, VF0, F, H, G, Q, R, limy, ISW, OSW, m, N): if OSW == 1: XPS = np.zeros((N,m),dtype=np.float); XFS = np.zeros((N,m),dtype=np.float) VPS = np.zeros((N,m,m),dtype=np.float); VFS = np.zeros((N,m,m),dtype=np.float) XF = XF0; VF = VF0; NSUM = 0.0; SIG2 = 0.0; LDET = 0.0 for n in xrange(N): # 1期先予測 XP = np.ndarray.flatten( np.dot(F, XF.T) ) #2週目から縦ベクトルになってしまうので、常に横ベクトルに変換 VP = np.dot( np.dot(F, VF), F.T ) + np.dot( np.dot(G, Q), G.T) # フィルタ # Rは操作しなければ縦ベクトル。pythonは横ベクトルになるので注意! if y[n] < limy: NSUM = NSUM + 1 B = np.dot( np.dot(H, VP), H.T) + R # Hは数学的には横ベクトル B1 = inverse(B) # nvar次元の縦ベクトル K = np.matrix(np.dot(VP, H.T)) * np.matrix(B1) # Kは縦ベクトルになる(matrix) e = np.array(y[n]).T - np.dot(H, XP.T) # nvar次元の縦ベクトル XF = np.array(XP) + np.array( K * np.matrix(e) ).T # 横ベクトル VF = np.array(VP) - np.array( K* np.matrix(H) * VP) SIG2 = SIG2 + np.ndarray.flatten(np.array( np.matrix(e) * np.matrix(B1) * np.matrix(e).T ))[0] # 1次元でも計算できるようにmatrixにする LDET = LDET + math.log(linalg.det(B)) else: XF = XP; VF = VP if OSW == 1: XPS[n,:] = XP; XFS[n,:] = XF; VPS[n,:,:] = VP; VFS[n,:,:] = VF SIG2 = SIG2 / NSUM if ISW == 0: FF = -0.5 * (NSUM * (math.log(2 * np.pi * SIG2) + 1) + LDET) else: FF = -0.5 * (NSUM * (math.log(2 * np.pi) + SIG2) + LDET) if OSW == 0: return {'LLF':FF, 'Ovar':SIG2} if OSW == 1: return {'XPS':XPS, 'XFS':XFS, 'VPS':VPS, 'VFS':VFS, 'LLF':FF, 'Ovar':SIG2}
def glr_Uellipse(self, position, U, prob): """Renders the ellipsoid enclosing the given fractional probability given the gaussian variance-covariance matrix U at the given position. C=1.8724 = 68% """ ## rotate U R = self.matrix[:3,:3] Ur = numpy.matrixmultiply(numpy.matrixmultiply(R, U), numpy.transpose(R)) Umax = max(linalg.eigenvalues(Ur)) try: limit_radius = Gaussian.GAUSS3C[prob] * MARGIN * math.sqrt(Umax) except ValueError: limit_radius = 2.0 try: Q = linalg.inverse(Ur) except linalg.LinAlgError: return self.object_list.append( (14, matrixmultiply43(self.matrix, position), limit_radius, self.material_color_r, self.material_color_g, self.material_color_b, Q, -Gaussian.GAUSS3C[prob]**2))
def __init__(self, a = 1.0, b = 1.0, c = 1.0, alpha = 90.0, beta = 90.0, gamma = 90.0, space_group = "P1", angle_units = "deg"): assert angle_units == "deg" or angle_units == "rad" self.a = a self.b = b self.c = c if angle_units == "deg": self.alpha = math.radians(alpha) self.beta = math.radians(beta) self.gamma = math.radians(gamma) elif angle_units == "rad": self.alpha = alpha self.beta = beta self.gamma = gamma self.space_group = SpaceGroups.GetSpaceGroup(space_group) self.orth_to_frac = self.calc_fractionalization_matrix() self.frac_to_orth = self.calc_orthogonalization_matrix() ## check our math! assert numpy.allclose(self.orth_to_frac, linalg.inverse(self.frac_to_orth))
def SMO(XPS, XFS, VPS, VFS, F, GSIG2, k, p, q, m, N): XSS = np.zeros((N,m),dtype=np.float); VSS = np.zeros((N,m,m),dtype=np.float) XS1 = XFS[N-1,:]; VS1 = VFS[N-1,:,:] XSS[N-1,:] = XS1; VSS[N-1,:,:] = VS1 for n1 in xrange(N-1): n = (N-1) - n1; XP = XPS[n,:]; XF = XFS[n-1,:] VP = VPS[n,:,:]; VF = VFS[n-1,:,:]; VPI = inverse(VP) A = np.dot( np.dot(VF, F.T), VPI) XS2 = XF + np.dot(A, (XS1 - XP)) VS2 = VF + np.dot( np.dot(A, (VS1 - VP)), A.T ) XS1 = XS2; VS1 = VS2 XSS[n-1,:] = XS1; VSS[n-1,:,:] = VS1 t=np.arange(N, dtype=np.float); s=np.arange(N, dtype=np.float); tv=np.arange(N, dtype=np.float); sv=np.arange(N, dtype=np.float) if p>0: for n in xrange(N): t[n]=XSS[n,0]; s[n]=XSS[n,k] tv[n]=GSIG2*VSS[n,0,0] sv[n]=GSIG2*VSS[n,k,k] else: for n in xrange(N): t[n]=XSS[n,0]; tv[n]=GSIG2*VSS[n,0,0] return {'trd':t, 'sea':s, 'trv':tv ,'sev':sv}
def inverse(self): "Returns the inverse of a rank-2 tensor." if self.rank == 2: from numpy.oldnumeric.linear_algebra import inverse return Tensor(inverse(self.array)) else: raise ValueError, 'Undefined operation'
def test_basic_matrix(self): """This test is rather monolithic, but at least it implements a concrete example that we can compare with our earlier computations. It also tests the mutual-inverse character of the equi-to-diag and diag-to-equi transformations.""" tolerance = 5.0e-15 eig = self.diag.compute_eigen_system(self.h_2, tolerance) self.diag.compute_diagonal_change() eq_type = eig.get_equilibrium_type() self.assertEquals(eq_type, self.eq_type) eigs = [pair.val for pair in eig.get_raw_eigen_value_vector_pairs()] for actual, expected in zip(eigs, self.eig_vals): self.assert_(abs(actual-expected) < tolerance, (actual, expected)) mat = self.diag.get_matrix_diag_to_equi() assert self.diag.matrix_is_symplectic(mat) sub_diag_into_equi = self.diag.matrix_as_vector_of_row_polynomials(mat) mat_inv = LinearAlgebra.inverse(MLab.array(mat)) sub_equi_into_diag = self.diag.matrix_as_vector_of_row_polynomials(mat_inv) h_diag_2 = self.h_2.substitute(sub_diag_into_equi) h_2_inv = h_diag_2.substitute(sub_equi_into_diag) self.assert_(h_2_inv) #non-zero self.assert_(not h_2_inv.is_constant()) self.assert_(self.lie.is_isograde(h_2_inv, 2)) self.assert_((self.h_2-h_2_inv).l1_norm() < 1.0e-14) comp = Complexifier(self.diag.get_lie_algebra(), eq_type) sub_complex_into_real = comp.calc_sub_complex_into_real() h_comp_2 = h_diag_2.substitute(sub_complex_into_real) h_comp_2 = h_comp_2.with_small_coeffs_removed(tolerance) self.assert_(self.lie.is_diagonal_polynomial(h_comp_2))
def glr_Uellipse(self, position, U, prob): """Renders the ellipsoid enclosing the given fractional probability given the gaussian variance-covariance matrix U at the given position. C=1.8724 = 68% """ ## rotate U R = self.matrix[:3, :3] Ur = numpy.dot(numpy.dot(R, U), numpy.transpose(R)) Umax = max(linalg.eigenvalues(Ur)) try: limit_radius = Gaussian.GAUSS3C[prob] * MARGIN * math.sqrt(Umax) except ValueError: limit_radius = 2.0 try: Q = linalg.inverse(Ur) except linalg.LinAlgError: return self.object_list.append( ( 14, dot43(self.matrix, position), limit_radius, self.material_color_r, self.material_color_g, self.material_color_b, Q, -Gaussian.GAUSS3C[prob] ** 2, ) )
def calc_DP2uij(U, V): """Calculate the square of the volumetric difference in the probability density function of anisotropic ADP tensors U and V. """ invU = linalg.inverse(U) invV = linalg.inverse(V) det_invU = linalg.determinant(invU) det_invV = linalg.determinant(invV) Pu2 = math.sqrt( det_invU / (64.0 * Constants.PI3) ) Pv2 = math.sqrt( det_invV / (64.0 * Constants.PI3) ) Puv = math.sqrt( (det_invU * det_invV) / (8.0*Constants.PI3 * linalg.determinant(invU + invV))) dP2 = Pu2 + Pv2 - (2.0 * Puv) return dP2
def __init__(self, numPoints, k): """numPoints: number of approximation points; k: number of basis functions [2,...,numPoints]""" self.numPoints = numPoints self.k = k ## assert k > 1, "Error TrigonomerticBasis: k <= 1" assert k <= numPoints, "Error TrigonomerticBasis: k > numPoints" # evaluate trigonometric basis functions on the given number of points from [-pi,pi] self.x = Numeric.arange(-1*math.pi, math.pi+0.0000001, 2*math.pi/(numPoints-1)) self.y = Numeric.ones((k, numPoints), Numeric.Float) for kk in range(1, k, 2): ## print "kk, cos %ix" % ((kk+1)/2.) self.y[kk] = MLab.cos(self.x*(kk+1)/2) for kk in range(2, k, 2): ## print "kk, sin %ix" % (kk/2.) self.y[kk] = MLab.sin(self.x*kk/2) # approx. matrix self.Ainv = LinearAlgebra.inverse(Numeric.matrixmultiply(self.y, Numeric.transpose(self.y))) self.yyTinvy = Numeric.matrixmultiply(LinearAlgebra.inverse(Numeric.matrixmultiply(self.y, Numeric.transpose(self.y))), self.y)
def calc_DP2uij(U, V): """Calculate the square of the volumetric difference in the probability density function of anisotropic ADP tensors U and V. """ invU = linalg.inverse(U) invV = linalg.inverse(V) det_invU = linalg.determinant(invU) det_invV = linalg.determinant(invV) Pu2 = math.sqrt(det_invU / (64.0 * Constants.PI3)) Pv2 = math.sqrt(det_invV / (64.0 * Constants.PI3)) Puv = math.sqrt((det_invU * det_invV) / (8.0 * Constants.PI3 * linalg.determinant(invU + invV))) dP2 = Pu2 + Pv2 - (2.0 * Puv) return dP2
def KF(y, XF0, VF0, F, H, G, Q, R, limy, ISW, OSW, m, N): if OSW == 1: XPS = np.zeros((N, m), dtype=np.float) XFS = np.zeros((N, m), dtype=np.float) VPS = np.zeros((N, m, m), dtype=np.float) VFS = np.zeros((N, m, m), dtype=np.float) XF = XF0 VF = VF0 NSUM = 0.0 SIG2 = 0.0 LDET = 0.0 for n in xrange(N): # 1期先予測 XP = np.ndarray.flatten(np.dot(F, XF.T)) #2週目から縦ベクトルになってしまうので、常に横ベクトルに変換 VP = np.dot(np.dot(F, VF), F.T) + np.dot(np.dot(G, Q), G.T) # フィルタ # Rは操作しなければ縦ベクトル。pythonは横ベクトルになるので注意! if y[n] < limy: NSUM = NSUM + 1 B = np.dot(np.dot(H, VP), H.T) + R # Hは数学的には横ベクトル B1 = inverse(B) # nvar次元の縦ベクトル K = np.matrix(np.dot(VP, H.T)) * np.matrix( B1) # Kは縦ベクトルになる(matrix) e = np.array(y[n]).T - np.dot(H, XP.T) # nvar次元の縦ベクトル XF = np.array(XP) + np.array(K * np.matrix(e)).T # 横ベクトル VF = np.array(VP) - np.array(K * np.matrix(H) * VP) SIG2 = SIG2 + np.ndarray.flatten( np.array(np.matrix(e) * np.matrix(B1) * np.matrix(e).T))[0] # 1次元でも計算できるようにmatrixにする LDET = LDET + math.log(linalg.det(B)) else: XF = XP VF = VP if OSW == 1: XPS[n, :] = XP XFS[n, :] = XF VPS[n, :, :] = VP VFS[n, :, :] = VF SIG2 = SIG2 / NSUM if ISW == 0: FF = -0.5 * (NSUM * (math.log(2 * np.pi * SIG2) + 1) + LDET) else: FF = -0.5 * (NSUM * (math.log(2 * np.pi) + SIG2) + LDET) if OSW == 0: return {'LLF': FF, 'Ovar': SIG2} if OSW == 1: return { 'XPS': XPS, 'XFS': XFS, 'VPS': VPS, 'VFS': VFS, 'LLF': FF, 'Ovar': SIG2 }
def interpolate3DTransform(matrixList, indexList, percent): """ This function gets input of two list and a percent value. Return value is a 4x4 matrix corresponding to percent% of the transformation. matrixList: a list of 4x4 transformation matrix indexList : a list of sorted index (positive float number) percent : a positive float number. if only one matrix in the matrix list: percent = 0.0 means no transformation (identity) 1.0 means 100% of the transformation (returns mat) 0.58 means 58% of translation and rotatetion 58% of rotation angle along the same rotation axis percent can go above 1.0 If matrixList has more than one matrix: matrixList=[M1, M2, M3] #Attention: All M uses the same reference frame indexList =[0.2, 0.5, 1.0] #Attention: assume the list sorted ascendingly p = 0.5 means apply M2 p = 0.8 means apply M3 p = 0.9 means apply M2 first, then apply 50% of M'. M' is the transformation from M2 to M3. 50% = (0.9-0.8) / (1.0-0.8) M2 x M' = M3 --> M2.inverse x M2 x M'= M2.inverse x M3 --> M'= M2.inverse x M """ listLen = len(matrixList) if listLen != len(indexList): raise ValueError("matrix list should have same length of index list") if listLen == 0: raise ValueError("no matrix found in the matrix list") offset = -1 for i in range(listLen): if indexList[i] >= percent: offset = i break prevMat = nextMat = N.identity(4,'f') if offset == -1: prevMat = matrixList[-1] p = percent/indexList[-1] return _interpolateMat(matrixList[-1], p) elif offset == 0: nextMat = matrixList[0] p = percent/indexList[0] return _interpolateMat(N.array(matrixList[0]), p) else: prevMat = matrixList[offset-1] nextMat = matrixList[offset] p = (percent-indexList[offset-1])/( indexList[offset]-indexList[offset-1]) from numpy.oldnumeric.linear_algebra import inverse M = N.dot(inverse(prevMat), nextMat) Mat = _interpolateMat(M, p) return N.dot(prevMat, Mat)
def interpolate3DTransform(matrixList, indexList, percent): """ This function gets input of two list and a percent value. Return value is a 4x4 matrix corresponding to percent% of the transformation. matrixList: a list of 4x4 transformation matrix indexList : a list of sorted index (positive float number) percent : a positive float number. if only one matrix in the matrix list: percent = 0.0 means no transformation (identity) 1.0 means 100% of the transformation (returns mat) 0.58 means 58% of translation and rotatetion 58% of rotation angle along the same rotation axis percent can go above 1.0 If matrixList has more than one matrix: matrixList=[M1, M2, M3] #Attention: All M uses the same reference frame indexList =[0.2, 0.5, 1.0] #Attention: assume the list sorted ascendingly p = 0.5 means apply M2 p = 0.8 means apply M3 p = 0.9 means apply M2 first, then apply 50% of M'. M' is the transformation from M2 to M3. 50% = (0.9-0.8) / (1.0-0.8) M2 x M' = M3 --> M2.inverse x M2 x M'= M2.inverse x M3 --> M'= M2.inverse x M """ listLen = len(matrixList) if listLen != len(indexList): raise ValueError("matrix list should have same length of index list") if listLen == 0: raise ValueError("no matrix found in the matrix list") offset = -1 for i in range(listLen): if indexList[i] >= percent: offset = i break prevMat = nextMat = N.identity(4, 'f') if offset == -1: prevMat = matrixList[-1] p = percent / indexList[-1] return _interpolateMat(matrixList[-1], p) elif offset == 0: nextMat = matrixList[0] p = percent / indexList[0] return _interpolateMat(N.array(matrixList[0]), p) else: prevMat = matrixList[offset - 1] nextMat = matrixList[offset] p = (percent - indexList[offset - 1]) / (indexList[offset] - indexList[offset - 1]) from numpy.oldnumeric.linear_algebra import inverse M = N.dot(inverse(prevMat), nextMat) Mat = _interpolateMat(M, p) return N.dot(prevMat, Mat)
def filtering(self, y, XP, VP): if y < self.limy: B = np.dot( np.dot(self.H, VP), self.H.T) + self.R # Hは数学的には横ベクトル B1 = inverse(B) K = np.matrix(np.dot(VP, self.H.T)) * np.matrix(B1) # Kは縦ベクトルになる(matrix) e = np.array(y).T - np.dot(self.H, XP.T) XF = np.array(XP) + np.array( K * np.matrix(e) ).T # 横ベクトル VF = np.array(VP) - np.array( K* np.matrix(self.H) * VP) self.SIG2 += np.ndarray.flatten(np.array( np.matrix(e) * np.matrix(B1) * np.matrix(e).T ))[0] # 1次元でも計算できるようにmatrixにする self.LDET += log(linalg.det(B)) else: XF = XP; VF = VP return XF, VF
def SMO(XPS, XFS, VPS, VFS, F, GSIG2, k, p, q, m, N): XSS = np.zeros((N,m),dtype=np.float); VSS = np.zeros((N,m,m),dtype=np.float) XS1 = XFS[N-1,:]; VS1 = VFS[N-1,:,:] XSS[N-1,:] = XS1; VSS[N-1,:,:] = VS1 for n1 in xrange(N-1): n = (N-1) - n1; XP = XPS[n,:]; XF = XFS[n-1,:] VP = VPS[n,:,:]; VF = VFS[n-1,:,:]; VPI = inverse(VP) A = np.dot( np.dot(VF, F.T), VPI) XS2 = XF + np.dot(A, (XS1 - XP)) VS2 = VF + np.dot( np.dot(A, (VS1 - VP)), A.T ) XS1 = XS2; VS1 = VS2 XSS[n-1,:] = XS1; VSS[n-1,:,:] = VS1 return {'XSS':XSS, 'VSS':VSS}
def __init__(self, numPoints, k): """numPoints: number of approximation points; k: number of basis functions [2,...,numPoints]""" self.numPoints = numPoints self.k = k ## assert k > 1, "Error TrigonomerticBasis: k <= 1" assert k <= numPoints, "Error TrigonomerticBasis: k > numPoints" # evaluate trigonometric basis functions on the given number of points from [-pi,pi] self.x = Numeric.arange(-1 * math.pi, math.pi + 0.0000001, 2 * math.pi / (numPoints - 1)) self.y = Numeric.ones((k, numPoints), Numeric.Float) for kk in range(1, k, 2): ## print "kk, cos %ix" % ((kk+1)/2.) self.y[kk] = MLab.cos(self.x * (kk + 1) / 2) for kk in range(2, k, 2): ## print "kk, sin %ix" % (kk/2.) self.y[kk] = MLab.sin(self.x * kk / 2) # approx. matrix self.Ainv = LinearAlgebra.inverse( Numeric.matrixmultiply(self.y, Numeric.transpose(self.y))) self.yyTinvy = Numeric.matrixmultiply( LinearAlgebra.inverse( Numeric.matrixmultiply(self.y, Numeric.transpose(self.y))), self.y)
def state_equation(x, t0, u): ''' Defined by Jeffsan Wang The state equation, dx/dt = f(x,t0,u) Computes the derivative of state at time t0 on the condition of input u. x[0:3] --> Position in ned frame x[3:6] --> Euler angle of body frame expressed in inertial frame x[6:9] --> Velocity in aircraft body frame x[9] --> Bais in Yaw direction of body frame u[0:3] --> Accelaration in body frame u[3:6] --> Angle rate of body frame expressed in inertial frame ''' [pos, eul, vel, bias] = [x[0:3], x[3:6], x[6:9], x[9]] [ax, ay, az, wx, wy, wz] = [u[0], u[1], u[2], u[3], u[4], u[5]] [phi, theta, psi] = [eul[0], eul[1], eul[2]] [vx, vy, vz] = [vel[0], vel[1], vel[2]] #positon transition [cp, sp, ct, st, cs, ss] = [cos(phi), sin(phi), cos(theta), sin(theta), cos(psi), sin(psi)] T = array([[ct * cs, ct * ss, -st], [sp * st * cs - cp * ss, sp * st * ss + cp * cs, sp * ct], [cp * st * cs + sp * ss, cp * st * ss - sp * cs, cp * ct]]) dev_pos = dot(inverse(T), vel) #euler angle transition tt = tan(theta) R = array([[1, sp * tt, cp * tt], [0, cp, -sp], [0, sp / ct, cp / ct]]) #print "R",R dev_euler = dot(R, [wx, wy, wz]) #print 'dev_euler', dev_euler #veloctiy transition dev_vx = ax - g * st - wy * vz + wz * vy dev_vy = ay + g * ct * sp - wz * vx + wx * vz dev_vz = az + g * ct * cp - wx * vy + wy * vx dev_vel = [dev_vx, dev_vy, dev_vz] #yaw bias transition dev_bias = 0 #merge state transition dev_x = hstack((dev_pos, dev_euler, dev_vel, dev_bias)) #print 'dev_x ', dev_x return dev_x
def SMO(self): """fixed-interval smoothing""" XS1 = self.XFS[self.term-1] VS1 = self.VFS[self.term-1] self.XSS[self.term-1] = XS1 self.VSS[self.term-1] = VS1 for n1 in xrange(self.term): n = (self.term-1) - n1; XP = self.XPS[n]; XF = self.XFS[n-1] VP = self.VPS[n]; VF = self.VFS[n-1]; VPI = inverse(VP) A = np.dot( np.dot(VF, self.F.T), VPI) XS2 = XF + np.dot(A, (XS1 - XP)) VS2 = VF + np.dot( np.dot(A, (VS1 - VP)), A.T ) XS1 = XS2; VS1 = VS2 self.XSS[n-1] = XS1 self.VSS[n-1] = VS1
def state_equation(x, t0, u): ''' Defined by Jeffsan Wang The state equation, dx/dt = f(x,t0,u) Computes the derivative of state at time t0 on the condition of input u. x[0:3] --> Position in ned frame x[3:6] --> Euler angle of body frame expressed in inertial frame x[6:9] --> Velocity in aircraft body frame x[9] --> Bais in Yaw direction of body frame u[0:3] --> Accelaration in body frame u[3:6] --> Angle rate of body frame expressed in inertial frame ''' [pos, eul, vel, bias] = [x[0:3], x[3:6], x[6:9], x[9]] [ax, ay, az, wx, wy, wz] = [u[0], u[1], u[2], u[3], u[4], u[5]] [phi, theta, psi] = [eul[0], eul[1], eul[2]] [vx, vy, vz] = [vel[0], vel[1], vel[2]] #positon transition [cp, sp, ct, st, cs, ss] = [cos(phi), sin(phi), cos(theta), sin(theta), cos(psi), sin(psi)] T = array([[ ct*cs, ct*ss, -st ], [sp*st*cs - cp*ss, sp*st*ss + cp*cs, sp*ct], [cp*st*cs + sp*ss, cp*st*ss - sp*cs, cp*ct]]) dev_pos = dot(inverse(T), vel) #euler angle transition tt = tan(theta) R = array([[1, sp * tt, cp * tt ], [0, cp, -sp ], [0, sp / ct, cp / ct ]]) #print "R",R dev_euler = dot(R, [wx, wy, wz]) #print 'dev_euler', dev_euler #veloctiy transition dev_vx = ax - g * st - wy * vz + wz * vy dev_vy = ay + g * ct * sp - wz * vx + wx * vz dev_vz = az + g * ct * cp - wx * vy + wy * vx dev_vel = [dev_vx, dev_vy, dev_vz] #yaw bias transition dev_bias = 0 #merge state transition dev_x = hstack((dev_pos, dev_euler, dev_vel, dev_bias)) #print 'dev_x ', dev_x return dev_x
def _gaussian(self, mean, cvm, x): m = len(mean) assert cvm.shape == (m, m), \ 'bad sized covariance matrix, %s' % str(cvm.shape) try: det = LinearAlgebra.determinant(cvm) inv = LinearAlgebra.inverse(cvm) a = det ** -0.5 * (2 * Numeric.pi) ** (-m / 2.0) dx = x - mean b = -0.5 * Numeric.matrixmultiply( \ Numeric.matrixmultiply(dx, inv), dx) return a * Numeric.exp(b) except OverflowError: # happens when the exponent is negative infinity - i.e. b = 0 # i.e. the inverse of cvm is huge (cvm is almost zero) return 0
def filtering(self, y, XP, VP): if y < self.limy: B = np.dot(np.dot(self.H, VP), self.H.T) + self.R # Hは数学的には横ベクトル B1 = inverse(B) K = np.matrix(np.dot(VP, self.H.T)) * np.matrix( B1) # Kは縦ベクトルになる(matrix) e = np.array(y).T - np.dot(self.H, XP.T) XF = np.array(XP) + np.array(K * np.matrix(e)).T # 横ベクトル VF = np.array(VP) - np.array(K * np.matrix(self.H) * VP) self.SIG2 += np.ndarray.flatten( np.array(np.matrix(e) * np.matrix(B1) * np.matrix(e).T))[0] # 1次元でも計算できるようにmatrixにする self.LDET += log(linalg.det(B)) else: XF = XP VF = VP return XF, VF
def setNormalization(self, normalization): if normalization == OrthPolyBasis.NORM_NONE: self.T = self._T0 elif normalization == OrthPolyBasis.NORM_NORM: self.T = self._T1 elif normalization == OrthPolyBasis.NORM_NORM_T0_1: self.T = self._T2 elif normalization == OrthPolyBasis.NORM_END1: self.T = self._T3 else: raise "Error: unknown normalization: " + str(normalization) self.TT = Numeric.matrixmultiply(self.T, Numeric.transpose(self.T)) self.TTinv = LinearAlgebra.inverse(self.TT) self.TTinvT = Numeric.matrixmultiply(self.TTinv, self.T) self.basisCoef = self._getBasisCoef(self.x, self.T) self._normalization = normalization self._checkOrth(self.T, self.TT, output=self._force)
def setNormalization(self, normalization): if normalization == OrthPolyBasis.NORM_NONE: self.T = self._T0 elif normalization == OrthPolyBasis.NORM_NORM: self.T = self._T1 elif normalization == OrthPolyBasis.NORM_NORM_T0_1: self.T = self._T2 elif normalization == OrthPolyBasis.NORM_END1: self.T = self._T3 else: raise "Error: unknown normalization: " + str(normalization) self.TT = Numeric.matrixmultiply(self.T, Numeric.transpose(self.T)) self.TTinv = LinearAlgebra.inverse(self.TT) self.TTinvT = Numeric.matrixmultiply(self.TTinv, self.T) self.basisCoef = self._getBasisCoef(self.x, self.T) self._normalization = normalization self._checkOrth(self.T, self.TT, output = self._force)
def get_matrix_equi_to_diag(self): """ The matrix which maps a vetor in the real equilibrium coordinate system into the corresponding vector in the real diagonal coordinate system. One can take a polynomial expression in terms of the real diagonal coordinates and convert it into an expression in the real equilibrium coordinates by:- 1. express the matrix as a vector of linear row-polynomials, denoted diag_in_equi, i.e., diagonal coordinates in terms of equilibrium ones. 2. poly_in_equi = poly_in_diag.substitute(diag_in_equi). """ return inverse(self.matrix_diag_to_equi)
def __call__(self, inMatrices=None, applyIndex=None): """outMatrices <- SymInverse(inMatrices, applyIndex=None) inMatrices: list of 4x4 matrices outMatrices: list of 4x4 matrices """ import numpy.oldnumeric.linear_algebra as LinearAlgebra if not inMatrices: inMatrices = [Numeric.identity(4).astype('f')] matrices = Numeric.array(inMatrices) assert matrices.shape[-2] == 4 and matrices.shape[-1] == 4 out = [] for im in matrices: #loop over node's incoming matrices out.append( LinearAlgebra.inverse(im) ) return out
def SMO(self): """fixed-interval smoothing""" XS1 = self.XFS[self.term - 1] VS1 = self.VFS[self.term - 1] self.XSS[self.term - 1] = XS1 self.VSS[self.term - 1] = VS1 for n1 in xrange(self.term): n = (self.term - 1) - n1 XP = self.XPS[n] XF = self.XFS[n - 1] VP = self.VPS[n] VF = self.VFS[n - 1] VPI = inverse(VP) A = np.dot(np.dot(VF, self.F.T), VPI) XS2 = XF + np.dot(A, (XS1 - XP)) VS2 = VF + np.dot(np.dot(A, (VS1 - VP)), A.T) XS1 = XS2 VS1 = VS2 self.XSS[n - 1] = XS1 self.VSS[n - 1] = VS1
def SMO(XPS, XFS, VPS, VFS, F, GSIG2, k, p, q, m, N): XSS = np.zeros((N, m), dtype=np.float) VSS = np.zeros((N, m, m), dtype=np.float) XS1 = XFS[N - 1, :] VS1 = VFS[N - 1, :, :] XSS[N - 1, :] = XS1 VSS[N - 1, :, :] = VS1 for n1 in xrange(N - 1): n = (N - 1) - n1 XP = XPS[n, :] XF = XFS[n - 1, :] VP = VPS[n, :, :] VF = VFS[n - 1, :, :] VPI = inverse(VP) A = np.dot(np.dot(VF, F.T), VPI) XS2 = XF + np.dot(A, (XS1 - XP)) VS2 = VF + np.dot(np.dot(A, (VS1 - VP)), A.T) XS1 = XS2 VS1 = VS2 XSS[n - 1, :] = XS1 VSS[n - 1, :, :] = VS1 t = np.arange(N, dtype=np.float) s = np.arange(N, dtype=np.float) tv = np.arange(N, dtype=np.float) sv = np.arange(N, dtype=np.float) if p > 0: for n in xrange(N): t[n] = XSS[n, 0] s[n] = XSS[n, k] tv[n] = GSIG2 * VSS[n, 0, 0] sv[n] = GSIG2 * VSS[n, k, k] else: for n in xrange(N): t[n] = XSS[n, 0] tv[n] = GSIG2 * VSS[n, 0, 0] return {'trd': t, 'sea': s, 'trv': tv, 'sev': sv}
def SMO(XPS, XFS, VPS, VFS, F, GSIG2, k, p, q, m, N): XSS = np.zeros((N, m), dtype=np.float) VSS = np.zeros((N, m, m), dtype=np.float) XS1 = XFS[N - 1, :] VS1 = VFS[N - 1, :, :] XSS[N - 1, :] = XS1 VSS[N - 1, :, :] = VS1 for n1 in xrange(N - 1): n = (N - 1) - n1 XP = XPS[n, :] XF = XFS[n - 1, :] VP = VPS[n, :, :] VF = VFS[n - 1, :, :] VPI = inverse(VP) A = np.dot(np.dot(VF, F.T), VPI) XS2 = XF + np.dot(A, (XS1 - XP)) VS2 = VF + np.dot(np.dot(A, (VS1 - VP)), A.T) XS1 = XS2 VS1 = VS2 XSS[n - 1, :] = XS1 VSS[n - 1, :, :] = VS1 return {'XSS': XSS, 'VSS': VSS}
def interpolate3DTransform1(matrixList, indexList, percent): # MS version that does not assume identity as fist matrix and does # not wrap around if percent <= indexList[0]: return matrixList[0] if percent >=indexList[-1]: return matrixList[-1] listLen = len(indexList) for i in range(listLen): if indexList[i] > percent: break prevMat = matrixList[i-1] nextMat = matrixList[i] from numpy.oldnumeric.linear_algebra import inverse M = N.dot(inverse(prevMat), nextMat) p = (percent-indexList[i-1]) / (indexList[i]-indexList[i-1]) Mat = _interpolateMat(M, p) return N.dot(prevMat, Mat)
def interpolate3DTransform1(matrixList, indexList, percent): # MS version that does not assume identity as fist matrix and does # not wrap around if percent <= indexList[0]: return matrixList[0] if percent >= indexList[-1]: return matrixList[-1] listLen = len(indexList) for i in range(listLen): if indexList[i] > percent: break prevMat = matrixList[i - 1] nextMat = matrixList[i] from numpy.oldnumeric.linear_algebra import inverse M = N.dot(inverse(prevMat), nextMat) p = (percent - indexList[i - 1]) / (indexList[i] - indexList[i - 1]) Mat = _interpolateMat(M, p) return N.dot(prevMat, Mat)
def sartran(self, rho, x, force=0, precis=DELTA): n = len(x) listflag = 0 if type(x) == list: x = Numeric.array(x, Numeric.Float) listflag = 1 sarx = Numeric.zeros(n, Numeric.Float) if n > WT_SMALL or force: sarx = x wx = self.splag(x) * rho sarx += wx while max(wx) > precis: wx = self.splag(wx) * rho sarx += wx else: # small weights full matrix inverse w = self.wt2mat() w *= -rho w += Numeric.identity(n) wx = LinearAlgebra.inverse(w) sarx = Numeric.matrixmultiply(wx, x) if listflag: return sarx.tolist() else: return sarx
def sartran(self,rho,x,force=0,precis=DELTA): n = len(x) listflag = 0 if type(x) == list: x = Numeric.array(x,Numeric.Float) listflag = 1 sarx = Numeric.zeros(n,Numeric.Float) if n > WT_SMALL or force: sarx = x wx = self.splag(x) * rho sarx += wx while max(wx) > precis: wx = self.splag(wx) * rho sarx += wx else: # small weights full matrix inverse w = self.wt2mat() w *= - rho w += Numeric.identity(n) wx = LinearAlgebra.inverse(w) sarx = Numeric.matrixmultiply(wx,x) if listflag: return sarx.tolist() else: return sarx
def test_basic_matrix(self): """This test is rather monolithic, but at least it implements a concrete example that we can compare with our earlier computations. It also tests the mutual-inverse character of the equi-to-diag and diag-to-equi transformations.""" tolerance = 5.0e-15 eig = self.diag.compute_eigen_system(self.h_2, tolerance) self.diag.compute_diagonal_change() eq_type = eig.get_equilibrium_type() self.assertEquals(eq_type, self.eq_type) eigs = [pair.val for pair in eig.get_raw_eigen_value_vector_pairs()] for actual, expected in zip(eigs, self.eig_vals): self.assert_( abs(actual - expected) < tolerance, (actual, expected)) mat = self.diag.get_matrix_diag_to_equi() assert self.diag.matrix_is_symplectic(mat) sub_diag_into_equi = self.diag.matrix_as_vector_of_row_polynomials(mat) mat_inv = LinearAlgebra.inverse(MLab.array(mat)) sub_equi_into_diag = self.diag.matrix_as_vector_of_row_polynomials( mat_inv) h_diag_2 = self.h_2.substitute(sub_diag_into_equi) h_2_inv = h_diag_2.substitute(sub_equi_into_diag) self.assert_(h_2_inv) #non-zero self.assert_(not h_2_inv.is_constant()) self.assert_(self.lie.is_isograde(h_2_inv, 2)) self.assert_((self.h_2 - h_2_inv).l1_norm() < 1.0e-14) comp = Complexifier(self.diag.get_lie_algebra(), eq_type) sub_complex_into_real = comp.calc_sub_complex_into_real() h_comp_2 = h_diag_2.substitute(sub_complex_into_real) h_comp_2 = h_comp_2.with_small_coeffs_removed(tolerance) self.assert_(self.lie.is_diagonal_polynomial(h_comp_2))
def LinearLeastSquaresFit(model0, parameters0, data0, maxiter, constrains0, weightflag, model_deriv=None, deltachi=0.01, fulloutput=0, xdata=None, ydata=None, sigmadata=None): #get the codes: # 0 = Free 1 = Positive 2 = Quoted # 3 = Fixed 4 = Factor 5 = Delta # 6 = Sum 7 = ignored constrains = [[], [], []] if len(constrains0) == 0: for i in range(len(parameters0)): constrains[0].append(0) constrains[1].append(0) constrains[2].append(0) else: for i in range(len(parameters0)): constrains[0].append(constrains0[0][i]) constrains[1].append(constrains0[1][i]) constrains[2].append(constrains0[2][i]) for i in range(len(parameters0)): if type(constrains[0][i]) == type('string'): #get the number if constrains[0][i] == "FREE": constrains[0][i] = CFREE elif constrains[0][i] == "POSITIVE": constrains[0][i] = CPOSITIVE elif constrains[0][i] == "QUOTED": constrains[0][i] = CQUOTED elif constrains[0][i] == "FIXED": constrains[0][i] = CFIXED elif constrains[0][i] == "FACTOR": constrains[0][i] = CFACTOR constrains[1][i] = int(constrains[1][i]) elif constrains[0][i] == "DELTA": constrains[0][i] = CDELTA constrains[1][i] = int(constrains[1][i]) elif constrains[0][i] == "SUM": constrains[0][i] = CSUM constrains[1][i] = int(constrains[1][i]) elif constrains[0][i] == "IGNORED": constrains[0][i] = CIGNORED elif constrains[0][i] == "IGNORE": constrains[0][i] = CIGNORED else: #I should raise an exception #constrains[0][i] = 0 raise ValueError, "Unknown constraint %s" % constrains[0][i] if (constrains[0][i] == CQUOTED): raise ValueError, "Linear fit cannot handle quoted constraint" # make a local copy of the function for an easy speed up ... model = model0 parameters = array(parameters0) if data0 is not None: selfx = array(map(lambda x: x[0], data0)) selfy = array(map(lambda x: x[1], data0)) else: selfx = xdata selfy = ydata selfweight = ones(selfy.shape, Float) nr0 = len(selfy) if data0 is not None: nc = len(data0[0]) else: if sigmadata is None: nc = 2 else: nc = 3 if weightflag == 1: if nc == 3: #dummy = abs(data[0:nr0:inc,2]) if data0 is not None: dummy = abs(array(map(lambda x: x[2], data0))) else: dummy = abs(array(sigmadata)) selfweight = 1.0 / (dummy + equal(dummy, 0)) selfweight = selfweight * selfweight else: selfweight = 1.0 / (abs(selfy) + equal(abs(selfy), 0)) n_param = len(parameters) #linear fit, use at own risk since there is no check for the #function being linear on its parameters. #Only the fixed constrains are handled properly x = selfx y = selfy weight = selfweight iter = maxiter niter = 0 newpar = parameters.__copy__() while (iter > 0): niter += 1 chisq0, alpha0, beta,\ n_free, free_index, noigno, fitparam, derivfactor =ChisqAlphaBeta( model,newpar, x,y,weight,constrains,model_deriv=model_deriv, linear=1) print "A", chisq0 nr, nc = alpha0.shape fittedpar = dot(beta, inverse(alpha0)) #check respect of constraints (only positive is handled -force parameter to 0 and fix it-) error = 0 for i in range(n_free): if constrains[0][free_index[i]] == CPOSITIVE: if fittedpar[0, i] < 0: #fix parameter to 0.0 and re-start the fit newpar[free_index[i]] = 0.0 constrains[0][free_index[i]] = CFIXED error = 1 if error: continue for i in range(n_free): newpar[free_index[i]] = fittedpar[0, i] newpar = array(getparameters(newpar, constrains)) iter = -1 yfit = model(newpar, x) chisq = sum(weight * (y - yfit) * (y - yfit)) sigma0 = sqrt(abs(diagonal(inverse(alpha0)))) sigmapar = getsigmaparameters(newpar, sigma0, constrains) lastdeltachi = chisq if not fulloutput: return newpar.tolist(), chisq / (len(y) - len(sigma0)), sigmapar.tolist() else: return newpar.tolist(), chisq / ( len(y) - len(sigma0)), sigmapar.tolist(), niter, lastdeltachi
def NMkernel(sita, H, D, Vsita): res = sita - np.dot(H.T, D) return np.dot( np.dot(res.T, inverse(Vsita)), res )
# step4--calculate_spending_habits_param_lambd=Sita_lmbd pool = Pool(processes=4) tmp = np.array( pool.map(calculate_spending_habits_param_lambd, ((hh, u, Xs, Sita_lmbd, Hlmbd, Vsita_lmbd, Ztld) for hh in xrange(nhh)))) pool.close() pool.join() Sita_lmbd = tmp[:, 0] rej_lmbd += tmp[:, 1] ### step5-------------------------------------- ## dlt側の算出---- # 多変量正規分布のパラメタの算出 D2 = np.dot(D.T, D) D2pA0 = D2 + A0 Hhat_dlt = np.dot(np.dot(inverse(D2), D.T), Sita_dlt) Dtld = np.dot( inverse(D2pA0), (np.dot(D2, Hhat_dlt) + np.dot(A0, np.ndarray.flatten(m0)))) rtld = np.ndarray.flatten(Dtld) sig = np.array(np.kron(Vsita_dlt, inverse(D2pA0)).T) # 多変量正規分布でサンプリング Hdlt = np.ndarray.flatten( hbm.randn_multivariate(rtld, np.matrix(sig), n=nvar)) ##----------------- ## lmbd側の算出---- # 多変量正規分布のパラメタの算出 Hhat_lmbd = np.dot(np.dot(inverse(D2), D.T), Sita_lmbd) Dtld = np.dot( inverse(D2pA0), (np.dot(D2, Hhat_lmbd) + np.dot(A0, np.ndarray.flatten(m0))))
def NMkernel(sita, H, D, Vsita): res = sita - np.dot(H.T, D) return np.dot(np.dot(res.T, inverse(Vsita)), res)
pool.join() # step3--calculate difference Sita_sys = calculate_difference_m(Xs) # step4--calculate_spending_habits_param_delta=Sita_dlt Sita_dlt, rej_dlt = calculate_spending_habits_param_delta_m( (u, Xs, Sita_dlt, Hdlt, Vsita_dlt, Ztld, rej_dlt)) # step4--calculate_spending_habits_param_lambd=Sita_lmbd Sita_lmbd, rej_lmbd = calculate_spending_habits_param_lambd_m( (u, Xs, Sita_lmbd, Hlmbd, Vsita_lmbd, Ztld, rej_lmbd)) ### step5-------------------------------------- ## dlt側の算出---- # 多変量正規分布のパラメタの算出 D2 = np.dot(D.T, D) D2pA0 = D2 + A0 Hhat_dlt = np.ndarray.flatten( np.dot(np.dot(inverse(D2), D.T), Sita_dlt.T)) Dtld = np.dot( inverse(D2pA0), (np.dot(D2, Hhat_dlt) + np.dot(A0, np.ndarray.flatten(m0)))) rtld = np.ndarray.flatten(Dtld) sig = np.array(np.kron(Vsita_dlt, inverse(D2pA0)).T) # 多変量正規分布でサンプリング Hdlt = np.ndarray.flatten( hbm.randn_multivariate(rtld, np.matrix(sig), n=nvar)) ##----------------- ## lmbd側の算出---- # 多変量正規分布のパラメタの算出 Hhat_lmbd = np.ndarray.flatten( np.dot(np.dot(inverse(D2), D.T), Sita_lmbd.T)) Dtld = np.dot( inverse(D2pA0),
alpha = min(1, new_Lsita_lmbd / old_Lsita_lmbd) if alpha == None: alpha = -1 uni = ss.uniform.rvs(loc=0, scale=1, size=1) if uni < alpha: Sita_lmbd[hh] = new_sita_lmbd else: rej_lmbd[hh] = rej_lmbd[hh] + 1 # -------------------------------------------- # ### step5-------------------------------------- ## dlt側の算出---- # 多変量正規分布のパラメタの算出 D2 = np.dot(D.T, D) D2pA0 = D2 + A0 Hhat_dlt = np.dot(np.dot(inverse(D2), D.T), Sita_dlt) Dtld = np.dot(inverse(D2pA0), (np.dot(D2, Hhat_dlt) + np.dot(A0, np.ndarray.flatten(m0)))) rtld = np.ndarray.flatten(Dtld) sig = np.array([D2pA0 * Vsita_dlt[i] for i in range(Vsita_dlt.shape[0])]) # 多変量正規分布でサンプリング Hdlt = np.ndarray.flatten(hbm.randn_multivariate(rtld, np.matrix(sig), n=nvar)) ##----------------- ## lmbd側の算出---- # 多変量正規分布のパラメタの算出 Hhat_lmbd = np.dot(np.dot(inverse(D2), D.T), Sita_lmbd) Dtld = np.dot(inverse(D2pA0), (np.dot(D2, Hhat_lmbd) + np.dot(A0, np.ndarray.flatten(m0)))) # Dtldをベクトルにバラす Dtld_ary = np.array(Dtld) # arrayじゃないと要素で操作できないのでarrayへ rtld = np.ndarray.flatten(Dtld_ary) sig = np.array([[D2pA0] * Vsita_lmbd[i] for i in range(Vsita_lmbd.shape[0])]) # 多変量正規分布でサンプリング
Sita_dlt = tmp[:,0] rej_dlt += tmp[:,1] # step4--calculate_spending_habits_param_lambd=Sita_lmbd pool = Pool(processes=pr) tmp = np.array( pool.map(calculate_spending_habits_param_lambd, ((hh, u, Xs, Sita_lmbd, Hlmbd, Vsita_lmbd, Ztld) for hh in xrange(nhh))) ) pool.close() pool.join() Sita_lmbd = tmp[:,0] rej_lmbd += tmp[:,1] ### step5-------------------------------------- ## dlt側の算出---- # 多変量正規分布のパラメタの算出 D2 = np.dot(D.T, D) D2pA0 = D2 + A0 Hhat_dlt = np.dot(np.dot(inverse(D2), D.T) , Sita_dlt) Dtld = np.dot( inverse(D2pA0) , (np.dot(D2, Hhat_dlt) + np.dot(A0, np.ndarray.flatten(m0))) ) rtld = np.ndarray.flatten(Dtld) sig = np.array( np.kron(Vsita_dlt, inverse(D2pA0)).T ) # 多変量正規分布でサンプリング Hdlt = np.ndarray.flatten( hbm.randn_multivariate(rtld, np.matrix(sig), n=nvar) ) ##----------------- ## lmbd側の算出---- # 多変量正規分布のパラメタの算出 Hhat_lmbd = np.dot( np.dot(inverse(D2), D.T) , Sita_lmbd) Dtld = np.dot( inverse(D2pA0) , (np.dot(D2, Hhat_lmbd) + np.dot(A0, np.ndarray.flatten(m0))) ) rtld = np.ndarray.flatten(Dtld) sig = np.array( np.kron(Vsita_lmbd, inverse(D2pA0)).T ) # 多変量正規分布でサンプリング Hlmbd = np.ndarray.flatten( hbm.randn_multivariate(rtld, np.matrix(sig), n=nvar) ) ##-----------------
def RestreinedLeastSquaresFit(model0, parameters0, data0, maxiter, constrains0, weightflag, model_deriv=None, deltachi=0.01, fulloutput=0, xdata=None, ydata=None, sigmadata=None): #get the codes: # 0 = Free 1 = Positive 2 = Quoted # 3 = Fixed 4 = Factor 5 = Delta # 6 = Sum 7 = ignored constrains = [[], [], []] for i in range(len(parameters0)): constrains[0].append(constrains0[0][i]) constrains[1].append(constrains0[1][i]) constrains[2].append(constrains0[2][i]) for i in range(len(parameters0)): if type(constrains[0][i]) == type('string'): #get the number if constrains[0][i] == "FREE": constrains[0][i] = CFREE elif constrains[0][i] == "POSITIVE": constrains[0][i] = CPOSITIVE elif constrains[0][i] == "QUOTED": constrains[0][i] = CQUOTED elif constrains[0][i] == "FIXED": constrains[0][i] = CFIXED elif constrains[0][i] == "FACTOR": constrains[0][i] = CFACTOR constrains[1][i] = int(constrains[1][i]) elif constrains[0][i] == "DELTA": constrains[0][i] = CDELTA constrains[1][i] = int(constrains[1][i]) elif constrains[0][i] == "SUM": constrains[0][i] = CSUM constrains[1][i] = int(constrains[1][i]) elif constrains[0][i] == "IGNORED": constrains[0][i] = CIGNORED elif constrains[0][i] == "IGNORE": constrains[0][i] = CIGNORED else: #I should raise an exception #constrains[0][i] = 0 raise ValueError, "Unknown constraint %s" % constrains[0][i] # make a local copy of the function for an easy speed up ... model = model0 parameters = array(parameters0) if ONED: data = array(data0) x = data[1:2, 0] fittedpar = parameters.__copy__() flambda = 0.001 iter = maxiter niter = 0 if ONED: selfx = data[:, 0] selfy = data[:, 1] else: if data0 is not None: selfx = array(map(lambda x: x[0], data0)) selfy = array(map(lambda x: x[1], data0)) else: selfx = xdata selfy = ydata selfweight = ones(selfy.shape, Float) if ONED: nr0, nc = data.shape else: nr0 = len(selfy) if data0 is not None: nc = len(data0[0]) else: if sigmadata is None: nc = 2 else: nc = 3 if weightflag == 1: if nc == 3: #dummy = abs(data[0:nr0:inc,2]) if ONED: dummy = abs(data[:, 2]) else: if data0 is not None: dummy = abs(array(map(lambda x: x[2], data0))) else: dummy = abs(array(sigmadata)) selfweight = 1.0 / (dummy + equal(dummy, 0)) selfweight = selfweight * selfweight else: selfweight = 1.0 / (abs(selfy) + equal(abs(selfy), 0)) n_param = len(parameters) selfalphazeros = zeros((n_param, n_param), Float) selfbetazeros = zeros((1, n_param), Float) index = arange(0, nr0, 1) while (iter > 0): niter = niter + 1 if (niter < 2) and (n_param * 3 < nr0): x = take(selfx, index) y = take(selfy, index) weight = take(selfweight, index) else: x = selfx y = selfy weight = selfweight chisq0, alpha0, beta,\ n_free, free_index, noigno, fitparam, derivfactor =ChisqAlphaBeta( model,fittedpar, x,y,weight,constrains,model_deriv=model_deriv) print "B ", chisq0 nr, nc = alpha0.shape flag = 0 lastdeltachi = chisq0 while flag == 0: newpar = parameters.__copy__() if (1): alpha = alpha0 + flambda * identity(nr) * alpha0 deltapar = dot(beta, inverse(alpha)) else: #an attempt to increase accuracy #(it was unsuccessful) alphadiag = sqrt(diagonal(alpha0)) npar = len(sqrt(diagonal(alpha0))) narray = zeros((npar, npar), Float) for i in range(npar): for j in range(npar): narray[i, j] = alpha0[i, j] / (alphadiag[i] * alphadiag[j]) narray = inverse(narray + flambda * identity(nr)) for i in range(npar): for j in range(npar): narray[i, j] = narray[i, j] / (alphadiag[i] * alphadiag[j]) deltapar = dot(beta, narray) pwork = zeros(deltapar.shape, Float) for i in range(n_free): if constrains[0][free_index[i]] == CFREE: pwork[0][i] = fitparam[i] + deltapar[0][i] elif constrains[0][free_index[i]] == CPOSITIVE: #abs method pwork[0][i] = fitparam[i] + deltapar[0][i] #square method #pwork [0] [i] = (sqrt(fitparam [i]) + deltapar [0] [i]) * \ # (sqrt(fitparam [i]) + deltapar [0] [i]) elif constrains[0][free_index[i]] == CQUOTED: pmax = max(constrains[1][free_index[i]], constrains[2][free_index[i]]) pmin = min(constrains[1][free_index[i]], constrains[2][free_index[i]]) A = 0.5 * (pmax + pmin) B = 0.5 * (pmax - pmin) if (B != 0): pwork [0] [i] = A + \ B * sin(arcsin((fitparam[i] - A)/B)+ \ deltapar [0] [i]) else: print "Error processing constrained fit" print "Parameter limits are", pmin, ' and ', pmax print "A = ", A, "B = ", B newpar[free_index[i]] = pwork[0][i] newpar = array(getparameters(newpar, constrains)) workpar = take(newpar, noigno) #yfit = model(workpar.tolist(), x) yfit = model(workpar, x) chisq = sum(weight * (y - yfit) * (y - yfit)) print "chisq ", chisq, "chisq0 ", chisq0 if chisq > chisq0: flambda = flambda * 10.0 if flambda > 1000: flag = 1 iter = 0 else: flag = 1 fittedpar = newpar.__copy__() lastdeltachi = (chisq0 - chisq) / (chisq0 + (chisq0 == 0)) if (lastdeltachi) < deltachi: pass # iter = 0 chisq0 = chisq flambda = flambda / 10.0 print "iter = ", iter, "chisq = ", chisq iter = iter - 1 sigma0 = sqrt(abs(diagonal(inverse(alpha0)))) sigmapar = getsigmaparameters(fittedpar, sigma0, constrains) if not fulloutput: return fittedpar.tolist(), chisq / (len(yfit) - len(sigma0)), sigmapar.tolist() else: return fittedpar.tolist(), chisq / ( len(yfit) - len(sigma0)), sigmapar.tolist(), niter, lastdeltachi