def __init__(self): self._processCovariance = sp.identity(13) self._measurementCovariance = sp.identity(3) self._errorCovariance = sp.identity(13) self._translationRotorFromBody = vec3(0) self._gravity = vec3([0.0, 0.0, 1.0]) self._magneticField = mat3.fromEulerXYZ(0.0, -1.22, 0.0) * vec3( [1.0, 0.0, 0.0]) # the magnetic field vector points to the magnetic north, with a -70 degrees (-1.22 radians) pitch (towards the ground) self._declinationAngle = 0.0 self.timestamp = 0.0 self.positionNED = vec3(0) self.velocityBody = vec3(0) self.orientationBody = quat(1) self.gyroBiases = vec3(0) self.gravityBody = self._gravity
def LUP(A): # throw warning flag when the number is too small # (close to 0) ok = 1 small = 1e-12 n = scipy.shape(A)[0] U = copy.copy(A) L = scipy.identity(n) P = scipy.identity(n) for j in range(1,n): s = scipy.argmax(abs(U[j-1:n,j-1])) + j-1 # argmax returs the index of that number if s != j-1: U = swap(U,s,j-1,n) P = swap(P,s,j-1,n) if j > 1: L = swap(L,s,j-1,j-1) # Since the multiplier is zero after the # second row after the pivot row, we only # need to calculate one row for U and L, which is the # row under the pivot row i = j+1 if abs(U[j-1,j-1]) < small: print("Near-zero pivot!") ok = 0 break L[i-1,j-1] = float(U[i-1,j-1])/U[j-1,j-1] for k in range(j,n+1): U[i-1,k-1] = U[i-1,k-1] - L[i-1,j-1] * U[j-1,k-1] return L,U,P,ok
def LUP(A): small = 1e-12 # a pivot smaller than this will raise the error flag "ok=0" n = scipy.shape(A)[0] # extract matrix size U = copy(A) # copy content of A (avoid linking U and A) L = scipy.identity(n) # initialize L and P P = scipy.identity(n) par = 1 # initial permutation (identity) is even ok = 1 # by default, we assume the matrix is non-singular for k in range(1, n): s = scipy.argmax(abs(U[k - 1:n, k - 1])) + k - 1 # find pivot element if abs(U[s, k - 1]) < small: # check if pivot is too close to zero print("(nearly) singular matrix, pivot smaller than %e" % small) ok = 0 break # matrix is too close to singular, exit with error flag up if s != k - 1: # if the pivot is not on the diagonal... par = -par # change parity U = swap(U, s, k - 1, n) # swap rows of U if k > 1: # swap rows of L left of diagonal element L = swap(L, s, k - 1, k - 1) P = swap(P, s, k - 1, n) # swap rows of P for j in range(k + 1, n + 1): # Gauss elimination of rows below pivot L[j - 1, k - 1] = U[j - 1, k - 1] / U[k - 1, k - 1] for i in range(k, n + 1): U[j - 1, i - 1] = U[j - 1, i - 1] - L[j - 1, k - 1] * U[k - 1, i - 1] print('L=', L) print(" ") print('U=', U) print(" ") print('P=', P) print(" ") return L, U, P, par, ok
def test_scipy_inv(self): invA = inv(self.A) self.assertAlmostEqual(sum(sum(abs(dot(invA, self.A) - identity(2)))), 0.0, 5) self.assertAlmostEqual(sum(sum(abs(dot(self.A, invA) - identity(2)))), 0.0, 5)
def __init__(self, respond = None, regressors = None, intercept = False, D = None, d = None, G = None, a = None, b = None, **args): """Input: paras where they are expected to be tuple or dictionary""" ECRegression.__init__(self,respond, regressors, intercept, D, d, **args) if self.intercept and G != None: self.G = scipy.zeros((self.n, self.n)) self.G[1:, 1:] = G elif self.intercept and G == None : self.G = scipy.identity(self.n) self.G[0, 0] = 0.0 elif not self.intercept and G != None: self.G = G else: self.G = scipy.identity(self.n) if self.intercept: self.a = scipy.zeros((self.n, 1)) self.a[1:] = a self.b = scipy.zeros((self.n, 1)) self.b[1:] = b else: if a is None: self.a = scipy.matrix( scipy.zeros((self.n,1))) else: self.a = a if b is None: self.b = scipy.matrix( scipy.ones((self.n,1))) else: self.b = b
def test_scipy_svd(self): U,D,Vt = svd(self.A) D = array([[D[0],0],[0,D[1]]],'d') self.assertAlmostEqual( sum(sum(abs(dot(U,U.transpose())- identity(2)))), 0.0, 5) self.assertAlmostEqual( sum(sum(abs(dot(Vt,Vt.transpose())- identity(2)))), 0.0, 5) self.assertAlmostEqual( sum(sum(abs(dot(U,dot(D,Vt)) - self.A))), 0.0, 5)
def find_object_frame_and_bounding_box(self, point_cloud): #leaving point cloud in the cluster frame cluster_frame = point_cloud.header.frame_id self.base_frame = cluster_frame (points, cluster_to_base_frame) = transform_point_cloud( self.tf_listener, point_cloud, self.base_frame) #run PCA on all 3 dimensions (shifted_points, xyz_mean) = self.mean_shift_xyz(points) directions = self.pca(shifted_points[0:3, :]) #convert the points to object frame: #rotate all the points to be in the frame of the eigenvectors (should already be centered around xyz_mean) rotmat = scipy.matrix(scipy.identity(4)) rotmat[0:3, 0:3] = directions object_points = rotmat**-1 * shifted_points #remove outliers from the cluster #object_points = self.remove_outliers(object_points) #find the object bounding box in the new object frame as [[xmin, ymin, zmin], [xmax, ymax, zmax]] (coordinates of opposite corners) object_bounding_box = [[0] * 3 for i in range(2)] object_bounding_box_dims = [0] * 3 for dim in range(3): object_bounding_box[0][dim] = object_points[dim, :].min() object_bounding_box[1][dim] = object_points[dim, :].max() object_bounding_box_dims[dim] = object_bounding_box[1][ dim] - object_bounding_box[0][dim] #now shift the object frame and bounding box so that the center is the center of the object offset_mat = scipy.mat(scipy.identity(4)) for i in range(3): offset = object_bounding_box[1][ i] - object_bounding_box_dims[i] / 2. #center object_bounding_box[0][i] -= offset #mins object_bounding_box[1][i] -= offset #maxes object_points[i, :] -= offset offset_mat[i, 3] = offset rotmat = rotmat * offset_mat #record the transforms from object frame to base frame and to the original cluster frame, #broadcast the object frame to tf, and draw the object frame in rviz unshift_mean = scipy.identity(4) for i in range(3): unshift_mean[i, 3] = xyz_mean[i] object_to_base_frame = unshift_mean * rotmat object_to_cluster_frame = cluster_to_base_frame**-1 * object_to_base_frame #broadcast the object frame to tf (object_frame_pos, object_frame_quat) = mat_to_pos_and_quat(object_to_cluster_frame) self.tf_broadcaster.sendTransform(object_frame_pos, object_frame_quat, rospy.Time.now(), "object_frame", cluster_frame) return (object_points, object_bounding_box_dims, object_bounding_box, object_to_base_frame, object_to_cluster_frame)
def computeReductions(self, qlist, wlist): deltalist = qlist / (1. + self.weights) deltalist_1add = qlist / (2. + self.weights) reductionlist = deltalist - deltalist_1add fractionlist = reductionlist / reductionlist.sum() recclist = matrix(identity(self.dim)[argmax(fractionlist), 0:], "int") deltalist_1add = arr2lst(deltalist_1add) reductionlist = arr2lst(reductionlist) fractionlist = arr2lst(fractionlist) recclist = arr2lst(recclist) # These are the recommendations for only one new sample currentEvalVariance = [ deltalist_1add, reductionlist, fractionlist, recclist ] # If you want more samples, add four more entries to this list, with this info if self.nNewSamples > 1: deltalist_madd = qlist / (1. + self.nNewSamples + self.weights) reductionlist_m = deltalist - deltalist_madd self.stateToSampleMore = argmax(reductionlist_m) fractionlist_m = reductionlist_m / reductionlist_m.sum() if self.recommendationScheme == 'Nina': # Nina's scheme: put all nNewSamples at one state recclist_m = matrix( self.nNewSamples * identity(self.dim)[argmax(fractionlist_m), 0:], "int") elif self.recommendationScheme == 'VAV': # VAV: a new possible scheme -- pick several to sample, but make sure the total counts are self.nNewSamples recclist_m = matrix(self.nNewSamples * fractionlist_m, "int") while recclist_m[0, :].sum() < self.nNewSamples: recclist_m[0, argmax(fractionlist_m)] += 1 deltalist_madd = arr2lst(deltalist_madd) reductionlist_m = arr2lst(reductionlist_m) fractionlist_m = arr2lst(fractionlist_m) recclist_m = arr2lst(recclist_m) currentEvalVariance.extend( [deltalist_madd, reductionlist_m, fractionlist_m, recclist_m]) # add two more things to the beginning of the list: deltalist, and 0....n-1 currentEvalVariance.insert(0, arr2lst(deltalist)) currentEvalVariance.insert(0, range( self.dim)) # 0 .... n-1 will be the firstthing #Now, transpose the whole thing so that this information if in column format currentEvalVariance = matrix(currentEvalVariance, "float64").T self.varianceContributions.append(currentEvalVariance)
def householder(A, reduced=False) -> Tuple[sp.matrix, sp.matrix]: ''' Given a matrix A, computes its QR factorisation using Householder reflections. Returns (Q, R) such that A = QR, Q is orthogonal and R is triangular. ''' m, n = A.shape A_full = sp.ndarray(A.shape) A_sub = A.copy() Q_full = sp.identity(m) # iterate over smaller dimension of A for i in range(min(A.shape)): # leftmost vector of A submatrix v = A_sub[:, 0] # vector with 1 in the first position. e_i = sp.zeros(v.shape[0]) e_i[0] = 1 # compute householder vector for P u = v + sign(v.item(0)) * spla.norm(v) * e_i # normalise u = u / spla.norm(u) # compute submatrix _P _P = sp.identity(v.shape[0]) - 2 * sp.outer(u, u) # embed this submatrix _P into the full size P P = spla.block_diag(sp.identity(i), _P) # compute next iteration of Q Q_full = P @ Q_full # compute next iteration of R A_sub = _P @ A_sub # copy first rows/cols to A_full A_full[i, i:] = A_sub[0, :] A_full[i:, i] = A_sub[:, 0] # iterate into submatrix A_sub = A_sub[1:, 1:] # Q_full is currently the inverse because it is applied to A. # thus, Q = Q_full^T. Q_full = Q_full.T if reduced: Q_full = Q_full[:, :n] A_full = A_full[:n, :] # A = QR # note that A has been reduced to R by applying the P's. return (Q_full, A_full)
def test_scipy_svd(self): U, D, Vt = svd(self.A) D = array([[D[0], 0], [0, D[1]]], 'd') self.assertAlmostEqual( sum(sum(abs(dot(U, U.transpose()) - identity(2)))), 0.0, 5) self.assertAlmostEqual( sum(sum(abs(dot(Vt, Vt.transpose()) - identity(2)))), 0.0, 5) self.assertAlmostEqual(sum(sum(abs(dot(U, dot(D, Vt)) - self.A))), 0.0, 5)
def Calculate_Beta(X, Pi): k = Pi.shape[1] N = X.shape[0] ones = sc.ones((k, k)) Y = sc.ones((N, N)) - sc.identity(N) - X Z = sc.ones((k, k)) - 0.5 * sc.identity(k) beta1 = ones + Z * Pi.T.dot(X.dot(Pi)) beta2 = ones + Z * Pi.T.dot(Y.dot(Pi)) return beta1, beta2
def find_object_frame_and_bounding_box(self, point_cloud): #leaving point cloud in the cluster frame cluster_frame = point_cloud.header.frame_id self.base_frame = cluster_frame (points, cluster_to_base_frame) = transform_point_cloud(self.tf_listener, point_cloud, self.base_frame) #run PCA on all 3 dimensions (shifted_points, xyz_mean) = self.mean_shift_xyz(points) directions = self.pca(shifted_points[0:3, :]) #convert the points to object frame: #rotate all the points to be in the frame of the eigenvectors (should already be centered around xyz_mean) rotmat = scipy.matrix(scipy.identity(4)) rotmat[0:3,0:3] = directions object_points = rotmat**-1 * shifted_points #remove outliers from the cluster #object_points = self.remove_outliers(object_points) #find the object bounding box in the new object frame as [[xmin, ymin, zmin], [xmax, ymax, zmax]] (coordinates of opposite corners) object_bounding_box = [[0]*3 for i in range(2)] object_bounding_box_dims = [0]*3 for dim in range(3): object_bounding_box[0][dim] = object_points[dim,:].min() object_bounding_box[1][dim] = object_points[dim,:].max() object_bounding_box_dims[dim] = object_bounding_box[1][dim] - object_bounding_box[0][dim] #now shift the object frame and bounding box so that the center is the center of the object offset_mat = scipy.mat(scipy.identity(4)) for i in range(3): offset = object_bounding_box[1][i] - object_bounding_box_dims[i]/2. #center object_bounding_box[0][i] -= offset #mins object_bounding_box[1][i] -= offset #maxes object_points[i, :] -= offset offset_mat[i,3] = offset rotmat = rotmat * offset_mat #record the transforms from object frame to base frame and to the original cluster frame, #broadcast the object frame to tf, and draw the object frame in rviz unshift_mean = scipy.identity(4) for i in range(3): unshift_mean[i,3] = xyz_mean[i] object_to_base_frame = unshift_mean*rotmat object_to_cluster_frame = cluster_to_base_frame**-1 * object_to_base_frame #broadcast the object frame to tf (object_frame_pos, object_frame_quat) = mat_to_pos_and_quat(object_to_cluster_frame) self.tf_broadcaster.sendTransform(object_frame_pos, object_frame_quat, rospy.Time.now(), "object_frame", cluster_frame) return (object_points, object_bounding_box_dims, object_bounding_box, object_to_base_frame, object_to_cluster_frame)
def IsingHamiltonian_old(n, h, J, g): ### Construct Hamiltonian ### Z = sp.matrix([[1,0],[0,-1]]) X = sp.matrix([[0,1],[1,0]]) I = sp.identity(2) alpha = sp.zeros((2**n,2**n)) beta = sp.zeros((2**n,2**n)) delta = sp.zeros((2**n,2**n)) matrices = [] # Calculate alpha for i in range(0,n): for m in range(0,n-1): matrices.append(I) matrices.insert(i, Z) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) alpha = alpha + temp*h[i] temp = 0 # Calculate beta for i in range(0,n): for j in range(0,n): if (i != j): for m in range(0,n-2): matrices.append(I) matrices.insert(i, Z) matrices.insert(j, Z) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) beta = beta + temp*J[i,j] beta = beta + g*sp.identity(2**n) temp = 0 # Calculate delta for i in range(0,n) : for m in range(0,n-1): matrices.append(I) matrices.insert(i, X) temp = matrices[0] matrices.pop(0) while (len(matrices) != 0): temp = sp.kron(temp, matrices[0]) matrices.pop(0) delta += temp return [alpha, beta, delta]
def pleiopred_inf(beta_hats1, beta_hats2, pr_sig1, pr_sig2, rho=0, n1=1000, n2=1000, ref_ld_mats1=None, ref_ld_mats2=None, ld_window_size=100): num_betas = len(beta_hats1) post_betas1 = sp.empty(num_betas) post_betas2 = sp.empty(num_betas) m = len(beta_hats1) for i, wi in enumerate(range(0, num_betas, ld_window_size)): start_i = wi stop_i = min(num_betas, wi + ld_window_size) curr_window_size = stop_i - start_i bhats = beta_hats1[start_i:stop_i] ghats = beta_hats2[start_i:stop_i] S11 = sp.diag(pr_sig1[start_i:stop_i]) S12 = sp.diag( rho * np.sqrt(pr_sig1[start_i:stop_i] * pr_sig2[start_i:stop_i])) S22 = sp.diag(pr_sig2[start_i:stop_i]) D1 = ref_ld_mats1[i] D2 = ref_ld_mats2[i] S = np.concatenate((np.concatenate( (S11, S12), axis=1), np.concatenate((S12, S22), axis=1)), axis=0) SD = np.concatenate( (np.concatenate( (n1 * np.dot(S11, D1), n2 * np.dot(S12, D2)), axis=1), np.concatenate( (n1 * np.dot(S12, D1), n2 * np.dot(S22, D2)), axis=1)), axis=0) A = sp.identity(2 * curr_window_size) + SD A_inv = linalg.pinv(A) W = sp.identity(2 * curr_window_size) - sp.dot(SD, A_inv) Sbeta_hats = sp.concatenate( (n1 * sp.dot(S11, bhats) + n2 * sp.dot(S12, ghats), n1 * sp.dot(S12, bhats) + n2 * sp.dot(S22, ghats)), axis=0) post_both = sp.dot(W, Sbeta_hats) ll = len(post_both) post_betas1[start_i:stop_i] = post_both[0:ll / 2] post_betas2[start_i:stop_i] = post_both[ll / 2:ll] updated_betas = {'D1': post_betas1, 'D2': post_betas2} return updated_betas
def measure(self, magneto): measurementJacobian = sp.zeros((3, 13)) magnetoJacobian = scipy_utils.d_inverse_rotation_d_quaternion_at( cgkit_to_scipy.convert(self.orientationBody), cgkit_to_scipy.convert(magneto)) scipy_utils.load_submatrix(measurementJacobian, (0, 6), magnetoJacobian) kalmanGain = sp.dot( sp.dot(self._errorCovariance, measurementJacobian.transpose()), linalg.inv( sp.dot( measurementJacobian, sp.dot(self._errorCovariance, measurementJacobian.transpose())) + self._measurementCovariance)) stateCorrection = sp.dot( kalmanGain, (cgkit_to_scipy.convert(magneto - self._predictMagnetoBody()))) self.orientationBody = self.orientationBody + quat( list(stateCorrection[6:10])) self._errorCovariance = sp.dot( (sp.identity(13) - sp.dot(kalmanGain, measurementJacobian)), self._errorCovariance)
def embedTraversal(cloned, obj,n,suffix): for i in range(len(obj)): if isinstance(obj[i],Model): cloned.body += [obj[i]] elif (isinstance(obj[i],tuple) or isinstance(obj[i],list)) and ( len(obj[i])==2): V,EV = obj[i] V = [v+n*[0.0] for v in V] cloned.body += [(V,EV)] elif (isinstance(obj[i],tuple) or isinstance(obj[i],list)) and ( len(obj[i])==3): V,FV,EV = obj[i] V = [v+n*[0.0] for v in V] cloned.body += [(V,FV,EV)] elif isinstance(obj[i],Mat): mat = obj[i] d,d = mat.shape newMat = scipy.identity(d+n*1) for h in range(d-1): for k in range(d-1): newMat[h,k] = mat[h,k] newMat[h,d-1+n*1] = mat[h,d-1] cloned.body += [newMat.view(Mat)] elif isinstance(obj[i],Struct): newObj = Struct() newObj.box = hstack((obj[i].box, [n*[0],n*[0]])) newObj.name = obj[i].name+suffix newObj.category = obj[i].category cloned.body += [embedTraversal(newObj, obj[i], n, suffix)] return cloned
def solve(self,rhs): """ Overrides LinearSolver.solve Result contains (solution,status) status is always 0, indicating that the method has converged """ if not self.built: N = len(self.point.getState()[0]) Dt = self.point.system.Dt dt = self.point.system.dt k = int(Dt/dt) I = scipy.identity(N) A = self.point.computeJacobian() B = I+dt*A[:N,:N] AA = B for i in range(k-1): AA=scipy.dot(AA,B) Matrix = A # zo blijven extra rijen en kolommen dezelfde als A Matrix[:N,:N]= I - AA self.Matrix = Matrix self.built = True else: Matrix=self.Matrix x=scipy.linalg.solve(Matrix,rhs) status = 0 return (x,status)
def solve(self, rhs): """ Overrides LinearSolver.solve Result contains (solution,status) status is always 0, indicating that the method has converged """ if not self.built: N = len(self.point.getState()[0]) Dt = self.point.system.Dt dt = self.point.system.dt k = int(Dt / dt) I = scipy.identity(N) A = self.point.computeJacobian() B = I + dt * A[:N, :N] AA = B for i in range(k - 1): AA = scipy.dot(AA, B) Matrix = A # zo blijven extra rijen en kolommen dezelfde als A Matrix[:N, :N] = I - AA self.Matrix = Matrix self.built = True else: Matrix = self.Matrix x = scipy.linalg.solve(Matrix, rhs) status = 0 return (x, status)
def test_correlated_scatter(self) : n = 50 r = (sp.arange(n, dtype=float) + 10.0*n)/10.0*n data = sp.sin(sp.arange(n)) * r amp = 25.0 theory = data/amp # Generate correlated matrix. C = random.rand(n, n) # [0, 1) # Raise to high power to make values near 1 rare. C = (C**10) * 0.2 C = (C + C.T)/2.0 C += sp.identity(n) C *= r[:, None]/2.0 C *= r[None, :]/2.0 # Generate random numbers in diagonal frame. h, R = linalg.eigh(C) self.assertTrue(sp.alltrue(h>0)) rand_vals = random.normal(size=n)*sp.sqrt(h) # Rotate back. data += sp.dot(R.T, rand_vals) out = utils.ampfit(data, C, theory) a, s = out['amp'], out['error'] self.assertTrue(sp.allclose(a, amp, atol=5.0*s, rtol=0)) # Expect the next line to fail 1/100 trials. self.assertFalse(sp.allclose(a, amp, atol=0.01*s, rtol=0))
def test_uncorrelated_noscatter(self): data = sp.arange(10, dtype=float) theory = data/2.0 C = sp.identity(10) out = utils.ampfit(data, C, theory) a, s = out['amp'], out['error'] self.assertAlmostEqual(a, 2)
def inv_rot_3D(x, y, z, rot="z", angles=[ma.pi]): """ Rotates a set of vectors by any number of Euler angles, in the reverse direction. input is as rot_3D, using the inverse order for rotations and angles, but leaving the angles un-flipped - the matrix inverses will take care of that. rot is a string of x's, y's and z's describing the order of rotations, left-to-right, while angles is the corresponding angle (in radians) for each rotation""" # Build rotation matrix R = sc.identity(3) for i in range(len(angles)): t = angles[i] if rot[i] == "x": Rx = LA.inv(Rot_x(t)) R = Rx * R continue elif rot[i] == "y": Ry = LA.inv(Rot_y(t)) R = Ry * R continue elif rot[i] == "z": Rz = LA.inv(Rot_z(t)) R = Rz * R continue else: print "!!! - Invalid rotation axis, {0}".format(rot(i)) # Now do the rotations if type(x) != type(sc.zeros(1)): out = R * sc.matrix([[x], [y], [z]]) x, y, z = float(out[0]), float(out[1]), float(out[2]) else: for i in range(len(x)): out = R * sc.matrix([[x[i]], [y[i]], [z[i]]]) x[i], y[i], z[i] = float(out[0]), float(out[1]), float(out[2]) return x, y, z """ MAYBE MAKE THIS ONE TAKE SAME INPUTS AS rot_3D TO MAKE THINGS EASIER """
def evalfREML(logDelta,MCtrials,X,Y,beta_rand,e_rand_unscaled): (N,M) = X.shape delta = sp.exp(logDelta) y_rand = sp.empty((N,MCtrials)) H_inv_y_rand = sp.empty((N,MCtrials)) beta_hat_rand = sp.empty((M,MCtrials)) e_hat_rand = sp.empty((N,MCtrials)) ## Calculating the matrix H=X%*%t(X)/M + delta*I_N H = sp.dot(X,X.T)/M + delta*sp.identity(N) x0 = sp.zeros(N) for t in range(0,MCtrials): ## build random phenotypes using pre-generated components y_rand[:,t] = sp.dot(X,beta_rand[:,t])+sp.sqrt(delta)*e_rand_unscaled[:,t] ## compute H^(-1)%*%y.rand[,t] by the aid of conjugate gradient iteration H_inv_y_rand[:,t] = conjugateGradientSolve(A=H,x0=x0,b=y_rand[:,t]) ## compute BLUP estimated SNP effect sizes and residuals beta_hat_rand[:,t] = 1/M*sp.dot(X.T,H_inv_y_rand[:,t]) e_hat_rand[:,t] = delta*H_inv_y_rand[:,t] ## compute BLUP estimated SNP effect sizes and residuals for real phenotypes H_inv_y_data = conjugateGradientSolve(A=H,x0=x0,b=Y) beta_hat_data = 1/M*sp.dot(X.T,H_inv_y_data) e_hat_data = delta*H_inv_y_data ## evaluate f_REML f = sp.log((sp.sum(beta_hat_data**2)/sp.sum(e_hat_data**2))/(sp.sum(beta_hat_rand**2)/sp.sum(e_hat_rand**2))) return(f)
def update(self, measuredState): #Compte the residual between measurement and prediction self.prefitResidual = measuredState - dot( [self.H, self.predictedState]) #Compute the Klaman gain intermediate = sp.linalg.inv(self.R + quadratic_form(self.H, self.predictedP)) self.Kt = dot([self.predictedP, self.H.T, intermediate]) #Update the state self.state = self.predictedState + dot([self.Kt, self.prefitResidual]) #Update covariance matrix self.P = quadratic_form(\ sp.identity(self.Kt.shape[0]) - dot([self.Kt,self.H]), self.predictedP)\ + quadratic_form(self.Kt, self.R) #Compute the postfit residual to see how well we are doing self.postfitResidual = measuredState - dot([self.H, self.state]) #Store the results self.append_data() self.data.prx.append(self.prefitResidual[0]) self.data.pry.append(self.prefitResidual[1])
def AlphaBetaCoeffs_old(n, a, b): " Construct the alpha and beta coefficient matrices. " Z = sp.matrix([[1,0],[0,-1]]) I = sp.identity(2) alpha = sp.zeros((2**n,2**n)) beta = sp.zeros((2**n,2**n)) m1 = [] m2 = [] for i in range(0,n): for m in range(0,n-1): m1.append(I) m1.insert(i, Z) temp1 = m1[0] m1.pop(0) while (len(m1) != 0): temp1 = sp.kron(temp1, m1[0]) m1.pop(0) alpha += temp1*a[i] for j in range(i+1, n): for m in range(0, n-2): m2.append(I) m2.insert(i, Z) m2.insert(j, Z) temp2 = m2[0] m2.pop(0) while (len(m2) != 0): temp2 = sp.kron(temp2, m2[0]) m2.pop(0) beta += (temp2)*b[i,j] return [alpha, beta]
def computeProjectionVectors( self, P, L, U ) : eK = matrix( identity( self.dim, float64 )[ 0: ,( self.dim - 1 ) ] ).T U = matrix(U, float64) U[ self.dim - 1, self.dim - 1 ] = 1.0 # Sergio: I added this exception because in rare cases, the matrix # U is singular, which gives rise to a LinAlgError. try: x1 = matrix( solve( U, eK ), float64 ) except LinAlgError: print "Matrix U was singular, so we input a fake x1\n" print "U: ", U x1 = matrix(ones(self.dim)) #print "x1", x1 del U LT = matrix( L, float64, copy=False ).T PT = matrix( P, float64, copy=False ).T x2 = matrix( solve( LT*PT, eK ), float64 ) del L del P del LT del PT del eK return ( x1, x2 )
def embedTraversal(cloned, obj, n, suffix): for i in range(len(obj)): if isinstance(obj[i], Model): cloned.body += [obj[i]] elif (isinstance(obj[i], tuple) or isinstance(obj[i], list)) and (len( obj[i]) == 2): V, EV = obj[i] V = [v + n * [0.0] for v in V] cloned.body += [(V, EV)] elif (isinstance(obj[i], tuple) or isinstance(obj[i], list)) and (len( obj[i]) == 3): V, FV, EV = obj[i] V = [v + n * [0.0] for v in V] cloned.body += [(V, FV, EV)] elif isinstance(obj[i], Mat): mat = obj[i] d, d = mat.shape newMat = scipy.identity(d + n * 1) for h in range(d - 1): for k in range(d - 1): newMat[h, k] = mat[h, k] newMat[h, d - 1 + n * 1] = mat[h, d - 1] cloned.body += [newMat.view(Mat)] elif isinstance(obj[i], Struct): newObj = Struct() newObj.box = hstack((obj[i].box, [n * [0], n * [0]])) newObj.name = obj[i].name + suffix newObj.category = obj[i].category cloned.body += [embedTraversal(newObj, obj[i], n, suffix)] return cloned
def rot_3D(x, y, z, rot="z", angles=[ma.pi]): """ Rotates a set of vectors by any number of Euler angles. rot is a string of x's, y's and z's describing the order of rotations, left-to-right, while angles is the corresponding angle (in radians) for each rotation""" # Build rotation matrix R = sc.identity(3) for i in range(len(angles)): t = angles[i] if rot[i] == "x": Rx = Rot_x(t) R = Rx * R continue elif rot[i] == "y": Ry = Rot_y(t) R = Ry * R continue elif rot[i] == "z": Rz = Rot_z(t) R = Rz * R continue else: print "!!! - Invalid rotation axis, {0}".format(rot(i)) # Now do the rotations if type(x) != type(sc.zeros(1)): out = R * sc.matrix([[x], [y], [z]]) x, y, z = float(out[0]), float(out[1]), float(out[2]) else: for i in range(len(x)): out = R * sc.matrix([[x[i]], [y[i]], [z[i]]]) x[i], y[i], z[i] = float(out[0]), float(out[1]), float(out[2]) return x, y, z
def GP_covmat(X1, X2, par, typ = 'SE', sigma = None): ''' Compute covariance matrix with or without white noise for a range of GP kernels. Currently implemented: - SE (squared exponential 1D, default) - SE_ARD (squared exponential with separate length scales for each input dimension) - M32 (Matern 32, 1D) - QP (quasi-periodic SE, 1D) ''' if typ == 'QP': DD = ssp.distance.cdist(X1, X2, 'euclidean') K = par[0]**2 * \ scipy.exp(- (scipy.sin(scipy.pi * DD / par[1]))**2 / 2. / par[2]**2 \ - DD**2 / 2. / par[3]**2) if typ == 'Per': DD = ssp.distance.cdist(X1, X2, 'euclidean') K = par[0]**2 * \ scipy.exp(- (scipy.sin(scipy.pi * DD / par[1]))**2 / 2. / par[2]**2) elif typ == 'M32': DD = ssp.distance.cdist(X1, X2, 'euclidean') arg = scipy.sqrt(3) * abs(DD) / par[1] K = par[0]**2 * (1 + arg) * scipy.exp(- arg) elif typ == 'SE_ARD': V = numpy.abs(numpy.matrix( numpy.diag( 1. / numpy.sqrt(2) / par[1:]) )) D2 = ssp.distance.cdist(X1 * V, X2 * V, 'sqeuclidean') K = par[0]**2 * numpy.exp( -D2 ) else: # 'SE (radial)' D2 = ssp.distance.cdist(X1, X2, 'sqeuclidean') K = par[0]**2 * scipy.exp(- D2 / 2. / par[1]**2) if sigma != None: N = X1.shape[0] K += sigma**2 * scipy.identity(N) return scipy.matrix(K)
def test_correlated_scatter(self): n = 50 r = (sp.arange(n, dtype=float) + 10.0 * n) / 10.0 * n data = sp.sin(sp.arange(n)) * r amp = 25.0 theory = data / amp # Generate correlated matrix. C = random.rand(n, n) # [0, 1) # Raise to high power to make values near 1 rare. C = (C**10) * 0.2 C = (C + C.T) / 2.0 C += sp.identity(n) C *= r[:, None] / 2.0 C *= r[None, :] / 2.0 # Generate random numbers in diagonal frame. h, R = linalg.eigh(C) self.assertTrue(sp.alltrue(h > 0)) rand_vals = random.normal(size=n) * sp.sqrt(h) # Rotate back. data += sp.dot(R.T, rand_vals) out = utils.ampfit(data, C, theory) a, s = out['amp'], out['error'] self.assertTrue(sp.allclose(a, amp, atol=5.0 * s, rtol=0)) # Expect the next line to fail 1/100 trials. self.assertFalse(sp.allclose(a, amp, atol=0.01 * s, rtol=0))
def __init__(self, respond=None, regressors=None, intercept=False, **args): """ :param respond: Dependent time series :type respond: TimeSeriesFrame<double> :param regressors: Independent time serieses :type regressors: TimeSeriesFrame<dobule> :param intercept: include/exclude intercept :type intercept: boolean :param args: reserve for future developement """ self.intercept = intercept self.respond = respond self.regressors = regressors self.respond = respond self.weight = args.get("weight") self.t, self.n = regressors.size() if self.intercept: self.regressors.data = scipy.hstack((scipy.ones( (self.t, 1)), self.regressors.data)) self.regressors.cheader.insert(0, "Intercept") self.n = self.n + 1 if self.weight is None: self.weight = scipy.identity(self.t) self.X, self.y, self.W = map( scipy.matrix, (self.regressors.data, self.respond.data, self.weight))
def __init__(self, f, shape, gradient=None, hessian=None): """Solves an optimization problem, except that it doesn't Arguments: f -- the function shape -- Dimension of input argument to function (if f: ℝⁿ->ℝ then shape=n) gradient -- the gradient (default to numerical approximation) hessian -- function returning hessian matrix (default to numerical approximation) """ self.f = f self.shape = shape if gradient: self.gradient = gradient else: def df(x): return numpy.array([ (self.f(x + df.h[i] * self.dx / 2.) - self.f(x - df.h[i] * self.dx / 2.)) / self.dx for i in xrange(shape) ]) df.h = scipy.identity(self.shape) self.gradient = df if hessian: self.hessian = hessian else: self.hessian = self._approx_hess()
def pcg(A, b, x_0, P=None): if P is None: P = sp.identity(A.shape[0]) P_inv = spla.inv(P) print('starting') r_0 = b - A @ x_0 r_prev = r = r_0 r_prod = (r.T @ P_inv @ (r)) x = x_0 p = P_inv @ r_0 k = 0 while True: k += 1 Ap = A @ p alpha = (r_prod / (p.T @ Ap)).item() x = x + alpha*p # x_k initially stores x_{k-1} r_prev = r r_prev_prod = r_prod r = r - alpha * Ap r_prod = (r.T @ P_inv @ (r)) beta = (r_prod / r_prev_prod).item() p = P_inv @ (r) + beta * p print(k, spla.norm(r)) if spla.norm(r) <= 10**-12: print('terminating from residual') break print(x) print(A @ x) return x
def argmin(self, start=None, tolerance=0.0001, maxit=100, stepsize=1.0): xold = start if start is not None else scipy.zeros(self.shape) # Initial hessian inverse guess B = scipy.identity(self.shape) grad = (tolerance + 1) * scipy.ones(self.shape) for it in xrange(maxit): if (it != 0 and numpy.linalg.norm(grad) < tolerance): break grad = self.gradient(xold) # Search direction s = numpy.dot(B, -1 * grad) # Use scipy line search until implemented here a = scipy.optimize.linesearch.line_search_wolfe2( self.f, self.gradient, xold, s, grad) s = a[0] * s xnew = xold + s if numpy.isnan(self.f(xnew)): break y = self.gradient(xnew) - grad ytb = numpy.dot(y, B) by = numpy.dot(B, y) B = B + numpy.outer(s, s) / numpy.dot(y, s) - numpy.outer( by, ytb) / numpy.dot(ytb, y) xold = xnew return xnew
def kalman_filter(b, V, Phi, y, X, sigma, Sigma, switch = 0, D = None, d = None, G = None, a = None, c = None): r""" .. math:: :nowrap: \begin{eqnarray*} \beta_{t|t-1} = \Phi \: \beta_{t-1|t-1}\\ V_{t|t-1} = \Phi V_{t-1|t-1} \Phi ^T + \Sigma \\ e_t = y_t - X_t \beta_{t|t-1}\\ K_t = V_{t|t-1} X_t^T (\sigma + X_t V_{t|t-1} X_t )^{-1}\\ \beta_{t|t} = \beta_{t|t-1} + K_t e_t\\ V_{t|t} = (I - K_t X_t^T) V_{t|t-1}\\ \end{eqnarray*} """ n = scipy.shape(X)[1] beta = scipy.empty(scipy.shape(X)) n = len(b) if D is None: D = scipy.ones((1, n)) if d is None: d = scipy.matrix(1.) if G is None: G = scipy.identity(n) if a is None: a = scipy.zeros((n, 1)) if c is None: c = scipy.ones((n, 1)) # import code; code.interact(local=locals()) (b, V) = kalman_predict(b, V, Phi, Sigma) for i in xrange(len(X)): beta[i] = scipy.array(b).T (b, V, e, K) = kalman_upd(b, V, y[i], X[i], sigma, Sigma, switch, D, d, G, a, c) (b, V) = kalman_predict(b, V, Phi, Sigma) return beta
def test(): '''a = mx('1,2,3;0,4,5;9,0,8') print a.shape print a.I print mx.A''' a = mx('1,2;3,2') b = mx('1,0,0;0,1,1') c = mx('1,0;0,1;1,0') # print c*(a*b) print a.shape[0] ai = sp.identity(a.shape[1]) # aif = ai.flat ail = ai.tolist() newit = ail[0] ail.append(newit) print ail #.repeat(2,1)#.reshape((2,)) ailm = sp.asmatrix(ail) print ailm # c = mx('1,2;0,4;9,2') # d = mx('1,2,0;4,9,2') # print a*b #每加入一个节点,得一A同型+1单位阵,,修改Isi+=1,与A相乘得新输出矩阵 linec = 0
def decompose( matrix ): # Returns the decomposition of a matrix A where # # Q.A.Q = P.L.U # # P.L.U is the factoring of Q.A.Q such that L is a lower triangular matrix with 1's # on the diagonal and U is an upper triangular matrix; P is the permutation (row-swapping # operations) required for this procedure. The permutation matrix Q is chosen such that # the last element of U is its smallest diagnoal element. If A has a zero eigenvalue, # then U's last element will be zero. dim = matrix.shape[ 0 ] # first decomposition ( P, L, U ) = lu( matrix ) # detect the smallest element of U smallestIndex = findsmallestdiag( U ) smallest = U[ smallestIndex, smallestIndex ] #show( matrix, "M" ) #show( U, "U" ) #print "Smallest element is %f at %d" % ( smallest, smallestIndex ) # is the permutation Q not just the identity matrix? Q = identity( dim ) if smallestIndex+1 != dim : # trick: exchange row 'smallestIndex' with row 'dim-1' of the identity matrix swaprow( Q, smallestIndex, dim-1 ) return ( P, L, U, Q )
def process_collision_geometry_for_table(self, firsttable, additional_tables=[]): table_object = CollisionObject() table_object.operation.operation = CollisionObjectOperation.ADD table_object.header.frame_id = firsttable.pose.header.frame_id table_object.header.stamp = rospy.Time.now() #create a box for each table for table in [ firsttable, ] + additional_tables: object = Shape() object.type = Shape.BOX object.dimensions.append(math.fabs(table.x_max - table.x_min)) object.dimensions.append(math.fabs(table.y_max - table.y_min)) object.dimensions.append(0.01) table_object.shapes.append(object) #set the origin of the table object in the middle of the firsttable table_mat = self.pose_to_mat(firsttable.pose.pose) table_offset = scipy.matrix([ (firsttable.x_min + firsttable.x_max) / 2.0, (firsttable.y_min + firsttable.y_max) / 2.0, 0.0 ]).T table_offset_mat = scipy.matrix(scipy.identity(4)) table_offset_mat[0:3, 3] = table_offset table_center = table_mat * table_offset_mat origin_pose = self.mat_to_pose(table_center) table_object.poses.append(origin_pose) table_object.id = "table" self.object_in_map_pub.publish(table_object)
def process_collision_geometry_for_table(self, firsttable, additional_tables = []): table_object = CollisionObject() table_object.operation.operation = CollisionObjectOperation.ADD table_object.header.frame_id = firsttable.pose.header.frame_id table_object.header.stamp = rospy.Time.now() #create a box for each table for table in [firsttable,]+additional_tables: object = Shape() object.type = Shape.BOX; object.dimensions.append(math.fabs(table.x_max-table.x_min)) object.dimensions.append(math.fabs(table.y_max-table.y_min)) object.dimensions.append(0.01) table_object.shapes.append(object) #set the origin of the table object in the middle of the firsttable table_mat = self.pose_to_mat(firsttable.pose.pose) table_offset = scipy.matrix([(firsttable.x_min + firsttable.x_max)/2.0, (firsttable.y_min + firsttable.y_max)/2.0, 0.0]).T table_offset_mat = scipy.matrix(scipy.identity(4)) table_offset_mat[0:3,3] = table_offset table_center = table_mat * table_offset_mat origin_pose = self.mat_to_pose(table_center) table_object.poses.append(origin_pose) table_object.id = "table" self.object_in_map_pub.publish(table_object)
def test_uncorrelated_noscatter(self): data = sp.arange(10, dtype=float) theory = data / 2.0 C = sp.identity(10) out = utils.ampfit(data, C, theory) a, s = out['amp'], out['error'] self.assertAlmostEqual(a, 2)
def argmin(self,start=None,tolerance=0.0001,maxit=100,stepsize=1.0): xold = start if start is not None else scipy.zeros(self.shape) # Initial hessian inverse guess B = scipy.identity(self.shape) grad=(tolerance+1)*scipy.ones(self.shape) for it in xrange(maxit): if (it != 0 and numpy.linalg.norm(grad)<tolerance): break grad = self.gradient(xold) # Search direction s = numpy.dot(B,-1*grad) # Use scipy line search until implemented here a=scipy.optimize.linesearch.line_search_wolfe2( self.f, self.gradient, xold, s, grad ) s = a[0] * s xnew = xold + s if numpy.isnan(self.f(xnew)): break y = self.gradient(xnew) -grad ytb = numpy.dot(y,B) by = numpy.dot(B,y) B = B + numpy.outer(s,s)/numpy.dot(y,s) - numpy.outer(by,ytb)/numpy.dot(ytb,y) xold = xnew return xnew
def computeProjectionVectors(self, P, L, U): eK = matrix(identity(self.dim, float64)[0:, (self.dim - 1)]).T U = matrix(U, float64) U[self.dim - 1, self.dim - 1] = 1.0 # Sergio: I added this exception because in rare cases, the matrix # U is singular, which gives rise to a LinAlgError. try: x1 = matrix(solve(U, eK), float64) except LinAlgError: print "Matrix U was singular, so we input a fake x1\n" print "U: ", U x1 = matrix(ones(self.dim)) #print "x1", x1 del U LT = matrix(L, float64, copy=False).T PT = matrix(P, float64, copy=False).T x2 = matrix(solve(LT * PT, eK), float64) del L del P del LT del PT del eK return (x1, x2)
def DfN(x, N, l): # Jacobian for second test case J = scipy.identity(N) # Initialize as N-by-N array S = sum(x) # Compute sum for i in range(0, N): # Assign values J[i, :] = J[i, :] + (i + 1.0) * l * math.sin( (i + 1.0) * S) * math.exp(l * math.cos( (i + 1.0) * S)) * scipy.ones((1, N)) return J
def generate_gaussians(k): """Generate k iid spherical k-dim Gaussians g_1, ..., g_k""" mean = [0 for x in range(0, k)] covariance = sp.matrix(sp.identity(k), copy=False) g = [] for i in range(0, k): tmp = sp.random.multivariate_normal(mean, covariance) g.append(tmp) return g
def ellipsoid(R=np.array([[2, 0, 0],[0, 1, 0],[0, 0, 1] ]),position=(0,0,0),thetares=20,phires=20,color=(0,0,1),opacity=1,tessel=0): ''' Create a ellipsoid actor. Stretch a unit sphere to make it an ellipsoid under a 3x3 translation matrix R R=sp.array([[2, 0, 0], [0, 1, 0], [0, 0, 1] ]) ''' Mat=sp.identity(4) Mat[0:3,0:3]=R ''' Mat=sp.array([[2, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1] ]) ''' mat=vtk.vtkMatrix4x4() for i in sp.ndindex(4,4): mat.SetElement(i[0],i[1],Mat[i]) radius=1 sphere = vtk.vtkSphereSource() sphere.SetRadius(radius) sphere.SetLatLongTessellation(tessel) sphere.SetThetaResolution(thetares) sphere.SetPhiResolution(phires) trans=vtk.vtkTransform() trans.Identity() #trans.Scale(0.3,0.9,0.2) trans.SetMatrix(mat) trans.Update() transf=vtk.vtkTransformPolyDataFilter() transf.SetTransform(trans) transf.SetInput(sphere.GetOutput()) transf.Update() spherem = vtk.vtkPolyDataMapper() spherem.SetInput(transf.GetOutput()) spherea = vtk.vtkActor() spherea.SetMapper(spherem) spherea.SetPosition(position) spherea.GetProperty().SetColor(color) spherea.GetProperty().SetOpacity(opacity) #spherea.GetProperty().SetRepresentationToWireframe() return spherea
def get_algebraic_page_rank(transition_matrix, dumping_factor=0.85): """Computes the page ranks in an algebraic way""" n = transition_matrix.shape[0] m_a = transition_matrix d = dumping_factor ls = la.inv(sp.identity(n) - d*m_a) rs = sp.matrix([(1-d)/n]*n).reshape(n, 1) return sp.dot(ls, rs)
def get_transform(tf_listener, frame1, frame2): temp_header = Header() temp_header.frame_id = frame1 temp_header.stamp = rospy.Time(0) try: frame1_to_frame2 = tf_listener.asMatrix(frame2, temp_header) except: rospy.logerr("tf transform was not there between %s and %s"%(frame1, frame2)) return scipy.matrix(scipy.identity(4)) return scipy.matrix(frame1_to_frame2)
def kalman_upd(beta, V, y, X, s, S, switch = 0, D = None, d = None, G = None, a = None, b = None): r""" This is the update step of kalman filter. .. math:: :nowrap: \begin{eqnarray*} e_t &=& y_t - X_t \beta_{t|t-1} \\ K_t &=& V_{t|t-1} X_t^T (\sigma + X_t V_{t|t-1} X_t )^{-1}\\ \beta_{t|t} &=& \beta_{t|t-1} + K_t e_t\\ V_{t|t} &=& (I - K_t X_t^T) V_{t|t-1}\\ \end{eqnarray*} """ e = y - X * beta K = V * X.T * ( s + X * V * X.T).I beta = beta + K * e if switch == 1: D = scipy.matrix(D) d = scipy.matrix(d) if DEBUG: print "beta: ", beta beta = beta - S * D.T * ( D * S * D.T).I * ( D * beta - d) if DEBUG: print "beta: ", beta elif switch == 2: G = scipy.matrix(G) a = scipy.matrix(a) b = scipy.matrix(b) n = len(beta) P = 2* V.I q = -2 * V.I.T * beta bigG = scipy.empty((2*n, n)) h = scipy.empty((2*n, 1)) bigG[:n, :] = -G bigG[n:, :] = G h[:n, :] = -a h[n:, :] = b paraset = map(cvxopt.matrix, (P, q, bigG, h, D, d)) beta = qp(*paraset)['x'] temp = K*X V = (scipy.identity(temp.shape[0]) - temp) * V return (beta, V, e, K)
def diffmat(x): n= sp.size(x) e= sp.ones((n,1)) Xdiff= sp.outer(x,e)-sp.outer(e,x)+sp.identity(n) xprod= -reduce(mul, Xdiff) W= sp.outer(1/xprod,e) D= W/sp.multiply(W.T,Xdiff) d= 1-sum(D) for k in range(0,n): D[k,k] = d[k] return -D.T
def my_matrixinv(mat): width = mat.shape[0] det = np.linalg.det if det == 0 : print "det = 0; matrix not invertable" identity = sp.identity(width) inv = sp.linalg.solve(mat,identity) return inv
def diffmat(x): # x is an ordered array of grid points n = sp.size(x) e = sp.ones((n,1)) Xdiff = sp.outer(x,e)-sp.outer(e,x)+sp.identity(n) xprod = -reduce(mul,Xdiff) # product of rows W = sp.outer(1/xprod,e) D = W/sp.multiply(W.T,Xdiff) d = 1-sum(D) for k in range(0,n): # Set diagonal elements D[k,k] = d[k] return -D.T
def __init__(self, respond = None, regressors = None, intercept = False, lamb = 1., W1 = None, W2 = None, Phi = None, D = None, d = scipy.matrix(1.00), G = None, a = None, b = None): """Input: paras where they are expected to be tuple or dictionary""" ICRegression.__init__(self, respond, regressors, intercept, D, d, G, a, b) if W1 is not None: self.W1 = W1 else: self.W1 = 1. if W2 is None: self.W2 = scipy.identity(self.n) else: self.W2 = W2 if Phi is None: self.Phi = scipy.identity(self.n) else: self.Phi = Phi self.lamb = lamb
def __init__(self, R, size): # cheat for now, use expm self.R = R w, vr = linalg.eig(R) self.size = size self.Eigvals = scipy.array(w, dtype=scipy.float64) self.S = scipy.identity(self.size) self.T = vr self.T_inv = linalg.inv(vr) self.Q_of_t_cache = {} # t --> Q(t)
def argmin(self,start=None,tolerance=0.0001,maxit=100,call=None): """ Find a minimum start: starting point, default 0 tolerance: break when ||gradient|| is less than maxit: iteration limit call: function to call at end of each iteration, is passed locals() as argument """ xold = start if start is not None else scipy.zeros(self.shape) B = scipy.identity(self.shape) for it in xrange(maxit): grad = self.gradient(xold) if (it != 0 and numpy.linalg.norm(grad)<tolerance): break s = numpy.dot(B,-1*grad) # Use scipy line search until implemented here a=scipy.optimize.linesearch.line_search_wolfe2( self.f, self.gradient, xold, s, grad ) s = a[0] * s xnew = xold + s # Break on nan if numpy.isnan(xnew).any(): xnew=xold break y = self.gradient(xnew) - grad # Update inverse hessian approximation # Using Sherman-Morisson updating ytb = numpy.dot(y,B) ys = numpy.dot(s,y) ss = numpy.outer(s,s) by = numpy.dot(B,y) bys = numpy.outer(by,s) B = B + (1 + numpy.dot(ytb,y)/ys)*ss/ys - (bys+ numpy.transpose(bys))/ys xold = xnew if call: call(locals()) return xnew
def __init__(self, A=_A, H=_H, R=_R, Q = _Q): dim = A.shape[0] self.A = A # Transition matrix self.H = H # Extraction matrix self.R = R # Covariance matrix, measurement noise self.Q = Q # Covariance matrix, process noise self.x_mu_prior = sp.zeros([dim, 1]) self.x_mu = sp.zeros([dim, 1]) self.P_prior = sp.zeros([dim, dim]) self.P = sp.zeros([dim, dim]) self.P[-1][-1] = .001 self.I = sp.identity(dim)
def calcDistanceMatrix(nDimPoints, distFunc=lambda deltaPoint: sqrt(sum(deltaPoint[d]**2 for d in xrange(len(deltaPoint))))): nDimPoints = array(nDimPoints) dim = len(nDimPoints[0]) delta = [None]*dim for d in xrange(dim): data = nDimPoints[:,d] delta[d] = data - reshape(data,(len(data),1)) # computes all possible combinations dist = distFunc(delta) dist = dist + identity(len(data))*dist.max() # eliminate self matching # dist is the matrix of distances from one coordinate to any other return dist
def diffmat(x): """Compute the differentiation matrix for x is an ordered array of grid points. Uses barycentric formulas for stability. """ n = sp.size(x) e = sp.ones((n,1)) Xdiff = sp.outer(x,e)-sp.outer(e,x)+sp.identity(n) xprod = -reduce(mul,Xdiff) # product of rows W = sp.outer(1/xprod,e) D = W/sp.multiply(W.T,Xdiff) d = 1-sum(D) for k in range(0,n): # Set diagonal elements D[k,k] = d[k] return -D.T
def generate_data(N): ''' Generate N data points form a 2D Gaussian Gaussian distribution with mean [1, 2] Usage: x = generate_data(N) Returns: x : a 2xN array Instructions: Use sp.random.mutivariate_normal ''' mean = [1, 2] cov = sp.identity(2) return sp.random.multivariate_normal(mean, cov, (N)).T