def get_rotation_matrix(*, start_point: np.ndarray, end_point: typing.Any) -> np.ndarray: if type(end_point) is tuple: end_point = np.array(end_point, np.float32) if np.array_equal(start_point, end_point): return np.identity(3, np.float32) if np.array_equal(start_point, -end_point): if start_point[0] == 1.0: return util.transforms.rotate(180, (0, 0, 1))[:3, :3] else: return util.transforms.rotate(180, (1, 0, 0))[:3, :3] else: x = np.cross(start_point, end_point) x = np.divide(x, np.linalg.norm(x)) start_point_length = np.linalg.norm(start_point) end_point_length = np.linalg.norm(end_point) cos_theta = np.dot(start_point, end_point)/(start_point_length*end_point_length) theta = math.acos(cos_theta) A = np.array([[ 0, -x[2], x[1]], [ x[2], 0, -x[0]], [-x[1], x[0], 0]], np.float32) rotate_matrix = np.identity(3, np.float32) rotate_matrix += A*math.sin(theta) rotate_matrix += np.dot(A, A)*(1 - math.cos(theta)) if x[2] <= 0: rotate_matrix = np.dot(rotate_matrix, util.transforms.rotate(180, (0, 0, 1))[:3, :3]) return rotate_matrix
def __init__(self, featureDimension, lambda_, eta_, userNum, windowSize =20): self.windowSize = windowSize self.counter = 0 self.userNum = userNum self.lambda_ = lambda_ # Basic stat in estimating Theta self.A = lambda_*np.identity(n = featureDimension*userNum) self.b = np.zeros(featureDimension*userNum) self.UserTheta = np.zeros(shape = (featureDimension, userNum)) #self.UserTheta = np.random.random((featureDimension, userNum)) self.AInv = np.linalg.inv(self.A) #self.W = np.random.random((userNum, userNum)) self.W = np.identity(n = userNum) self.Wlong = vectorize(self.W) self.batchGradient = np.zeros(userNum*userNum) self.CoTheta = np.dot(self.UserTheta, self.W) self.BigW = np.kron(np.transpose(self.W), np.identity(n=featureDimension)) self.CCA = np.identity(n = featureDimension*userNum) self.BigTheta = np.kron(np.identity(n=userNum) , self.UserTheta) self.W_X_arr = [] self.W_y_arr = [] for i in range(userNum): self.W_X_arr.append([]) self.W_y_arr.append([])
def __init__(self): self._position = numpy.zeros((2,)) self._position_frozen = False self._matrix = numpy.matrix(numpy.identity(3, numpy.float64)) self._temp_matrix = numpy.matrix(numpy.identity(3, numpy.float64)) self._selected = False self._scene = None
def Energy_condensate_full(Q, F1, x, y, H, mu, kappa, Ns) : if Q==0 and F1 ==0 : return 1e14 m = find_minimum(Q, F1, mu, kappa, Ns) if m[0] < H/2 : return 1e14 result = 0 for n1 in range(Ns) : for n2 in range(Ns) : M, dim = HamiltonianMatrix(n1, n2, Q, F1, 0, H, mu, kappa, Ns, 'T1') B = np.identity(dim) B[dim/2:dim, dim/2:dim] = -np.identity(dim/2) eig = np.absolute(np.real(lin.eigvals(np.dot(B,M)))) result += sum(eig)/2 vec = [x[Ns * n1 + n2], np.conjugate(y[Ns * ((Ns - n1) % Ns) + (Ns - n2) % Ns])] result += np.dot(vec, np.dot(np.conj(vec).T, M)) return result - 3 * Ns ** 2 * (np.abs(F1)**2 - np.abs(Q)**2)/2 - Ns**2 * mu*(1. + kappa) + Ns * H
def updateParameters(self, articlePicked, click, userID): self.counter +=1 self.Wlong = vectorize(self.W) featureDimension = len(articlePicked.featureVector) T_X = vectorize(np.outer(articlePicked.featureVector, self.W.T[userID])) self.A += np.outer(T_X, T_X) self.b += click*T_X self.AInv = np.linalg.inv(self.A) self.UserTheta = matrixize(np.dot(self.AInv, self.b), len(articlePicked.featureVector)) Xi_Matirx = np.zeros(shape = (featureDimension, self.userNum)) Xi_Matirx.T[userID] = articlePicked.featureVector W_X = vectorize( np.dot(np.transpose(self.UserTheta), Xi_Matirx)) self.batchGradient +=evaluateGradient(W_X, click, self.Wlong, self.lambda_, self.regu ) if self.counter%self.windowSize ==0: self.Wlong -= 1/(float(self.counter/self.windowSize)+1)*self.batchGradient self.W = matrixize(self.Wlong, self.userNum) self.W = normalize(self.W, axis=0, norm='l1') #print 'SVD', self.W self.batchGradient = np.zeros(self.userNum*self.userNum) # Use Ridge regression to fit W ''' plt.pcolor(self.W_b) plt.colorbar plt.show() ''' if self.W.T[userID].any() <0 or self.W.T[userID].any()>1: print self.W.T[userID] self.CoTheta = np.dot(self.UserTheta, self.W) self.BigW = np.kron(np.transpose(self.W), np.identity(n=len(articlePicked.featureVector))) self.CCA = np.dot(np.dot(self.BigW , self.AInv), np.transpose(self.BigW)) self.BigTheta = np.kron(np.identity(n=self.userNum) , self.UserTheta)
def reInitilize(self): d = np.shape(self.A)[0] # as theta is re-initialized some part of the structures are set to zero self.A = np.identity(n=d) self.b = np.zeros(d) self.A_inv = np.identity(n=d) self.theta = np.zeros(d) self.pta = 0
def test_linear_2p2s_with_dof_at_1_correlation_matrix(self): dof_index = 0 output_index = 0 model_instance = dict(models.model_data.model_structure) model_instance["parameters"] = numpy.array([2.0, 4.0]) model_instance["inputs"] = numpy.array([[1.0, 10], [2.0, 20], [3.0, 30]]) problem_instance = dict(models.model_data.problem_structure) problem_instance["output_indices"] = [output_index] problem_instance["inputs"] = model_instance["inputs"] problem_instance["parameters"] = [model_instance["parameters"][dof_index]] problem_instance["parameter_indices"] = [dof_index] dof = [1.0] # one standard deviation offset = -1 measured = numpy.array([[1.0], [2.0], [3.0]]) + offset problem_instance["outputs"] = measured sens = [J_linear_2p2s([dof[0], model_instance["parameters"][1]], x) for x in model_instance["inputs"]] # identity covariance matrix of observation errors cov_obs_errs = numpy.identity(3) actual = metrics.confidence_measures.compute_covariance_matrix(sens, cov_obs_errs) expected = numpy.array([[1**2+2**2+3**2, 0], [0, 0]]) [[self.assertEqual(act, exp) for act, exp in zip(acts, exps)] for acts, exps in zip(actual, expected)] # diagonal covariance matrix of observation errors multiplier = 2 cov_obs_errs = numpy.identity(3) * multiplier actual = metrics.confidence_measures.compute_covariance_matrix(sens, cov_obs_errs) expected = numpy.array([[(1**2+2**2+3**2)/multiplier, 0], [0, 0]]) [[self.assertEqual(act, exp) for act, exp in zip(acts, exps)] for acts, exps in zip(actual, expected)]
def P(self, debug=False,mu=np.identity(6),g=9.81,n=1,N=0,N_=np.infty, a=np.ones(1), b=np.zeros(1), hip=np.zeros((1,3)), foot=np.nan*np.ones((1,3)), lamb=1.*np.ones(1), kapp=1e3*np.ones(1), psi=np.zeros(1), zeta=np.zeros(1)): """ Parameters: mu - 6x6 - SE(3) inertia tensor g - scalar - gravitational constant n - int - number of legs N - int - step count; negative indicates descent; zero disables count a - 1 x n - binary vector indicating leg active b - 1 x n - binary vector indicating leg stance hip - n x 3 - hip locations in body frame foot - n x 3 - foot stance locations in world frame lamb - 1 x n - leg lengths kapp - 1 x n - leg stiffness psi,zeta - 1 x n - leg touchdown angle is dot( expm(zeta*skew(e_z)), expm(psi*skew(e_y)) ) debug - bool - flag for printing debugging info """ ex,ey,ez = np.identity(3) # TODO: sanitize inputs (make np.array, broadcast to correct size) return Struct(j=None,debug=debug,mu=mu,muinv=np.linalg.inv(mu), g=g,n=n,N=N,N_=N_,a=a,b=b, hip=hip,foot=foot,lamb=lamb,kapp=kapp,psi=psi,zeta=zeta, ex=ex,ey=ey,ez=ez)
def maximum_muscle_force(OptimalLengths, Angle, AngularVelocity, R, OptimalForces): """ OptimalLengths must be a 1X4 Matrix with optimal muscle lengths for 4 muscles. Angle should be the current angle. AngularVelocity should be the current angular velocity. R is the 1X4 moment arm matrix. And OptimalForces should be a 4X1 matrix of forces produced by each muscle when at optimal lengths and velocities. """ # Force-Length Considerations CurrentMuscleLengths = OptimalLengths.T - R.T*Angle NormalizedMuscleLengths = np.matrix([(CurrentMuscleLengths[i,0]/OptimalLengths.T[i,0])-1 for i in range(4)]) # We subtract one from every Normalized Muscle Length to find the percentage # above and below the optimal length. MaximumMuscleForce_FL = np.identity(4)*[force_length_curve(NormalizedMuscleLengths[0,i]) \ + passive_force_length_curve(NormalizedMuscleLengths[0,i],) \ for i in range(4)] # Force-Velocity Considerations CurrentMuscleVelocity = -R.T*AngularVelocity NormalizedMuscleVelocity = [CurrentMuscleVelocity[i,0]/OptimalLengths.T[i,0] for i in range(4)] MaximumMuscleForce_FV = np.identity(4)* \ [force_velocity_curve(NormalizedMuscleLengths[0,i],NormalizedMuscleVelocity[i]) \ for i in range(4)] MaximumMuscleForce = (MaximumMuscleForce_FL+MaximumMuscleForce_FV)*[OptimalForces[0,i] for i in range(4)] return(MaximumMuscleForce)
def Slv(self, x0, *args): self._args = args[0] self._N = len(x0) Idn = np.identity(self._N) # Identity Matrix x = x0 # Initial Value(xはNかける1の縦ベクトルなので計算注意) hesse = np.identity(self._N) # Hessian Matrix g2 = self.Gradient(x) #勾配を求める(自作関数)。次元はN*1 # k = [] for i in xrange(self._max): g1 = g2 #次元はN*1 print self.Length(g1) if self.Length(g1) < self._eps: break # Converged(収束) p = np.dot((-1 * hesse), g1.T) #pはN*1の縦ベクトル k = self.Gold(x, p) #α値の更新(自作関数) ← 改訂ニュートン法による。ステップ幅の決定。kはスカラ s = p*k #sはN*1の縦ベクトル x += s #ここでxが更新される x[x<1e-4]=1e-4; x[x>1e2]=1e2 # # Calculating Hessian Matrix g2 = self.Gradient(x) y = g2 - g1 #勾配の差(次元はN*1) yt = y.T #次元は1*N st = s.T #次元は1*N z = np.dot(st, y) if z == 0: break # wether Converged # # BFGS Formula hesse = (Idn - s * yt / z) * hesse * (Idn - y * st / z) + s * st / z; #ヘッセ行列の近似値の更新。 # # Returning X and Y return [x, self._func(x, self._args)]
def compose_matrix(scale=None, shear=None, angles=None, translate=None,perspective=None): """Return 4x4 transformation matrix from sequence of transformations. Code modified from the work of Christoph Gohlke link provided here http://www.lfd.uci.edu/~gohlke/code/transformations.py.html This is the inverse of the decompose_matrix function. Parameters ------------- scale : vector of 3 scaling factors shear : list of shear factors for x-y, x-z, y-z axes angles : list of Euler angles about static x, y, z axes translate : translation vector along x, y, z axes perspective : perspective partition of matrix Returns --------- matrix : 4x4 array Examples ---------- >>> import math >>> import numpy as np >>> import dipy.core.geometry as gm >>> scale = np.random.random(3) - 0.5 >>> shear = np.random.random(3) - 0.5 >>> angles = (np.random.random(3) - 0.5) * (2*math.pi) >>> trans = np.random.random(3) - 0.5 >>> persp = np.random.random(4) - 0.5 >>> M0 = gm.compose_matrix(scale, shear, angles, trans, persp) """ M = np.identity(4) if perspective is not None: P = np.identity(4) P[3, :] = perspective[:4] M = np.dot(M, P) if translate is not None: T = np.identity(4) T[:3, 3] = translate[:3] M = np.dot(M, T) if angles is not None: R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') M = np.dot(M, R) if shear is not None: Z = np.identity(4) Z[1, 2] = shear[2] Z[0, 2] = shear[1] Z[0, 1] = shear[0] M = np.dot(M, Z) if scale is not None: S = np.identity(4) S[0, 0] = scale[0] S[1, 1] = scale[1] S[2, 2] = scale[2] M = np.dot(M, S) M /= M[3, 3] return M
def __init__(self, population, sigma, **params): self.parents = population self.dim = len(self.parents[0]) # Selection self.mu = params.get("mu", len(self.parents)) self.lambda_ = params.get("lambda_", 1) # Step size control self.d = params.get("d", 1.0 + self.dim / 2.0) self.ptarg = params.get("ptarg", 1.0 / (5.0 + 0.5)) self.cp = params.get("cp", self.ptarg / (2.0 + self.ptarg)) # Covariance matrix adaptation self.cc = params.get("cc", 2.0 / (self.dim + 2.0)) self.ccov = params.get("ccov", 2.0 / (self.dim ** 2 + 6.0)) self.pthresh = params.get("pthresh", 0.44) # Internal parameters associated to the mu parent self.sigmas = [sigma] * len(population) # Lower Cholesky matrix (Sampling matrix) self.A = [numpy.identity(self.dim) for _ in range(len(population))] # Inverse Cholesky matrix (Used in the update of A) self.invCholesky = [numpy.identity(self.dim) for _ in range(len(population))] self.pc = [numpy.zeros(self.dim) for _ in range(len(population))] self.psucc = [self.ptarg] * len(population) self.indicator = params.get("indicator", tools.hypervolume)
def rotation(ntheta): '''Find rotation matrix from axis-angle vector.''' theta = la.norm(ntheta) if theta == 0: return np.identity(3) Q = dual(ntheta / theta) return np.identity(3) - Q * math.sin(theta) + dot(Q, Q) * (1 - math.cos(theta))
def Hfs(L,S,I): """Provides the L dot S matrix (fine structure)""" gS=int(2*S+1) #number of mS values Sx=jx(S) Sy=jy(S) Sz=jz(S) Si=identity(gS) gL=int(2*L+1) Lx=jx(L) Ly=jy(L) Lz=jz(L) Li=identity(gL) gJ=gL*gS Jx=kron(Lx,Si)+kron(Li,Sx) Jy=kron(Ly,Si)+kron(Li,Sy) Jz=kron(Lz,Si)+kron(Li,Sz) J2=dot(Jx,Jx)+dot(Jy,Jy)+dot(Jz,Jz) gI=int(2*I+1) Ii=identity(gI) gF=gJ*gI Fi=identity(gF) Hfs=0.5*(kron(J2,Ii)-L*(L+1)*Fi-S*(S+1)*Fi) # fine structure in m_L,m_S,m_I basis return Hfs
def Hhfs(L,S,I): """Provides the I dot J matrix (magnetic dipole interaction)""" gS=int(2*S+1) Sx=jx(S) Sy=jy(S) Sz=jz(S) Si=identity(gS) gL=int(2*L+1) Lx=jx(L) Ly=jy(L) Lz=jz(L) Li=identity(gL) gJ=gL*gS Jx=kron(Lx,Si)+kron(Li,Sx) Jy=kron(Ly,Si)+kron(Li,Sy) Jz=kron(Lz,Si)+kron(Li,Sz) Ji=identity(gJ) J2=dot(Jx,Jx)+dot(Jy,Jy)+dot(Jz,Jz) gI=int(2*I+1) gF=gJ*gI Ix=jx(I) Iy=jy(I) Iz=jz(I) Ii=identity(gI) Fx=kron(Jx,Ii)+kron(Ji,Ix) Fy=kron(Jy,Ii)+kron(Ji,Iy) Fz=kron(Jz,Ii)+kron(Ji,Iz) Fi=identity(gF) F2=dot(Fx,Fx)+dot(Fy,Fy)+dot(Fz,Fz) Hhfs=0.5*(F2-I*(I+1)*Fi-kron(J2,Ii)) return Hhfs
def rotne_prager_tensor_loops(r_vectors, eta, a, *args, **kwargs): ''' Calculate free rotne prager tensor for particles at locations given by r_vectors (list of 3 dimensional locations) of radius a. ''' num_particles = len(r_vectors) fluid_mobility = np.array([np.zeros(3*num_particles) for _ in range(3*num_particles)]) for j in range(num_particles): for k in range(num_particles): if j != k: # Particle interaction, rotne prager. r = r_vectors[j] - r_vectors[k] r_norm = np.linalg.norm(r) if r_norm > 2.*a: # Constants for far RPY tensor, taken from OverdampedIB paper. C1 = 3.*a/(4.*r_norm) + (a**3)/(2.*r_norm**3) C2 = 3.*a/(4.*r_norm) - (3.*a**3)/(2.*r_norm**3) elif r_norm <= 2.*a: # This is for the close interaction, # Call C3 -> C1 and C4 -> C2 C1 = 1 - 9.*r_norm/(32.*a) C2 = 3*r_norm/(32.*a) fluid_mobility[(j*3):(j*3 + 3), (k*3):(k*3 + 3)] = (1./(6.*np.pi*eta*a)*( C1*np.identity(3) + C2*np.outer(r, r)/(np.maximum(r_norm, np.finfo(float).eps)**2))) elif j == k: # j == k, diagonal block. fluid_mobility[(j*3):(j*3 + 3), (k*3):(k*3 + 3)] = ((1./(6.*np.pi*eta*a)) * np.identity(3)) return fluid_mobility
def _get_H(self, debug=False): """ returns H_t as defined in algorithm 2 Reference: https://en.wikipedia.org/wiki/Limited-memory_BFGS http://www.ccms.or.kr/data/pdfpaper/jcms21_1/21_1_117.pdf https://homes.cs.washington.edu/~galen/files/quasi-newton-notes.pdf """ I = np.identity(len(self.w)) if min(len(self.s), len(self.y)) == 0: print "Warning: No second order information used!" return I assert len(self.s) > 0, "s cannot be empty." assert len(self.s) == len(self.y), "s and y must have same length" assert self.s[0].shape == self.y[0].shape, \ "s and y must have same shape" assert abs(self.y[-1]).sum() != 0, "latest y entry cannot be 0!" assert 1/np.inner(self.y[-1], self.s[-1]) != 0, "!" I = np.identity(len(self.s[0])) H = np.dot((np.inner(self.s[-1], self.y[-1]) / np.inner(self.y[-1], self.y[-1])), I) for (s_j, y_j) in itertools.izip(self.s, self.y): rho = 1.0/np.inner(y_j, s_j) V = I - np.multiply(rho, np.outer(s_j, y_j)) H = (V).dot(H).dot(V.T) H += np.multiply(rho, np.outer(s_j, s_j)) return H
def __init__(self, xdims, udims, T, A1=None, B1=None, A2=None, B2=None, stoch=False): self.xdims = xdims self.udims = udims self.T = T self.t = 0 self.robot = None if A1 is None: A1 = np.zeros((self.xdims, self.xdims)) if A2 is None: A2 = np.zeros((self.xdims, self.xdims)) if B1 is None: B1 = np.zeros((self.xdims, self.udims)) if B2 is None: B2 = np.zeros((self.xdims, self.udims)) self.A1 = A1 self.A2 = A2 self.B1 = B1 self.B2 = B2 self.stoch = stoch self.mean = np.zeros(self.xdims) self.cov_init = np.identity(self.xdims) * 20 self.cov = np.identity(self.xdims) * .1
def __init__(self, population, sigma, params={}): # Create a centroid individual self.centroid = copy.deepcopy(population[0]) # Clear its content self.centroid[:] = self.centroid[0:0] # The centroid is used in new individual creation self.centroid.extend(numpy.mean(population, 0)) self.dim = len(self.centroid) self.sigma = sigma self.pc = numpy.zeros(self.dim) self.ps = numpy.zeros(self.dim) self.chiN = math.sqrt(self.dim) * (1 - 1. / (4. * self.dim) + \ 1. / (21. * self.dim**2)) self.B = numpy.identity(self.dim) self.C = numpy.identity(self.dim) self.diagD = numpy.ones(self.dim) self.BD = self.B * self.diagD self.lambda_ = params.get("lambda_", int(4 + 3 * math.log(self.dim))) self.update_count = 0 self.params = params self.computeParams(self.params)
def matrix_exp(M, ntaylor=20, nsquare=10): """Computes the exponential of a square matrix via a Taylor series. Calculates the matrix exponential by first calculating exp(M/(2**nsquare)), then squaring the result the appropriate number of times. Args: M: Matrix to be exponentiated. ntaylor: Optional integer giving the number of terms in the Taylor series. Defaults to 15. nsquare: Optional integer giving how many times the original matrix will be halved. Defaults to 15. Returns: The matrix exponential of M. """ n = M.shape[1] tc = np.zeros(ntaylor + 1) tc[0] = 1.0 for i in range(ntaylor): tc[i + 1] = tc[i] / (i + 1) SM = np.copy(M) / 2.0**nsquare EM = np.identity(n, float) * tc[ntaylor] for i in range(ntaylor - 1, -1, -1): EM = np.dot(SM, EM) EM += np.identity(n) * tc[i] for i in range(nsquare): EM = np.dot(EM, EM) return EM
def __init__(self): dt = 0.005 #State-transition model self.A = np.array([ [1,0,dt,0], [0,1,0,dt], [0,0,1,0], [0,0,0,1] ]) #Observation model self.H = np.array([[1,0,0,0],[0,1,0,0]]) #Process/State noise vel_noise_std = 0.005 pos_noise_std = 0.005 self.Q = np.array([ [pos_noise_std*pos_noise_std,0,0,0], [0,pos_noise_std*pos_noise_std,0,0], [0,0,vel_noise_std*vel_noise_std,0], [0,0,0,vel_noise_std*vel_noise_std] ]) #Sensor/Measurement noise measurement_noise_std = 0.5 self.R = measurement_noise_std * measurement_noise_std * np.identity(2) self.x = np.zeros((4,1)) #Initial state vector [x,y,vx,vy] self.sigma = 9 * np.identity(4) #Initial covariance matrix
def test_symm_algorithm_equivalence(): """Test different stabilization methods in the computation of modes, in the presence and/or absence of the discrete symmetries.""" np.random.seed(400) for n in (12, 20, 40): for sym in kwant.rmt.sym_list: # Random onsite and hopping matrices in symmetry class h_cell = kwant.rmt.gaussian(n, sym) # Hopping is an offdiagonal block of a Hamiltonian. We rescale it # to ensure that there are modes at the Fermi level. h_hop = 10 * kwant.rmt.gaussian(2*n, sym)[:n, n:] if kwant.rmt.p(sym): p_mat = np.array(kwant.rmt.h_p_matrix[sym]) p_mat = np.kron(np.identity(n // len(p_mat)), p_mat) else: p_mat = None if kwant.rmt.t(sym): t_mat = np.array(kwant.rmt.h_t_matrix[sym]) t_mat = np.kron(np.identity(n // len(t_mat)), t_mat) else: t_mat = None if kwant.rmt.c(sym): c_mat = np.kron(np.identity(n // 2), np.diag([1, -1])) else: c_mat = None check_equivalence(h_cell, h_hop, n, sym=sym, particle_hole=p_mat, chiral=c_mat, time_reversal=t_mat)
def on_key_down(self, keysym): # Control simulation if keysym.sym == sdl.SDLK_q: self.exit_() if keysym.sym == sdl.SDLK_r: self.reset() if keysym.sym == sdl.SDLK_p: self.runSimulation = not self.runSimulation print '-'*40 print "Run Simulation: ", self.runSimulation if keysym.sym == sdl.SDLK_h: self.showHelp() if keysym.sym == sdl.SDLK_g: if self.worldView == 0: self.worldView = 1 self.renderer.setProjection(self.sim.width, self.sim.height) self.renderer.matModelView = np.identity(4,'f') self.renderer.matView = np.identity(4,'f') elif self.worldView == 1: self.worldView = 0 width, height = self.userController.agent.fov self.renderer.setProjection(width, height) self.renderer.worldView = self.worldView if keysym.sym == sdl.SDLK_f: self.frameStats.show() if keysym.sym == sdl.SDLK_i: # use for tests pass # Control agent self.userController.keyDown(keysym.sym)
def test_cell_triclinic(): while True: rvecs = np.random.uniform(-1, 1, (3, 3)) if abs(np.linalg.det(rvecs)) > 0.1: break cell = Cell(rvecs) # Test attributes assert cell.nvec == 3 assert (cell.rvecs == rvecs).all() assert abs(cell.volume - abs(np.linalg.det(rvecs))) < 1e-10 assert abs(np.dot(cell.gvecs, cell.rvecs.transpose()) - np.identity(3)).max() < 1e-5 assert abs(np.dot(cell.gvecs.transpose(), cell.rvecs) - np.identity(3)).max() < 1e-5 cell2 = Cell(-cell.rvecs) assert abs(cell2.volume - abs(np.linalg.det(rvecs))) < 1e-10 for i in xrange(3): assert cell.get_rlength(i) == cell.rlengths[i] assert cell.get_glength(i) == cell.glengths[i] assert cell.get_rspacing(i) == cell.rspacings[i] assert cell.get_gspacing(i) == cell.gspacings[i] assert abs(cell.get_rlength(i) - 1.0/cell.get_gspacing(i)) < 1e-10 assert abs(cell.get_glength(i) - 1.0/cell.get_rspacing(i)) < 1e-10 # Test methods (1) vec1 = np.array([10.0, 0.0, 5.0])*angstrom cell.mic(vec1) cell.add_rvec(vec1, np.array([1,2,3])) # Test methods (2) check_frac_cart(cell) check_g_lincomb_dot_rvecs(cell)
def get_bands(h,output_file="BANDS2D_",nindex=[-1,1], nk=50,nsuper=1,reciprocal=False, operator=None,k0=[0.,0.]): """ Calculate band structure""" if h.dimensionality!=2: raise # continue if two dimensional hk_gen = h.get_hk_gen() # gets the function to generate h(k) kxs = np.linspace(-nsuper,nsuper,nk)+k0[0] # generate kx kys = np.linspace(-nsuper,nsuper,nk)+k0[1] # generate ky kdos = [] # empty list kxout = [] kyout = [] if reciprocal: R = h.geometry.get_k2K() # get matrix else: R = np.matrix(np.identity(3)) # get identity # setup a reasonable value for delta # setup the operator if operator is None: operator = np.matrix(np.identity(h.intra.shape[0])) os.system("rm -f "+output_file+"*") # delete previous files fo = [open(output_file+"_"+str(i)+".OUT","w") for i in nindex] # files for x in kxs: for y in kxs: r = np.matrix([x,y,0.]).T # real space vectors k = np.array((R*r).T)[0] # change of basis hk = hk_gen(k) # get hamiltonian evals = lg.eigvalsh(hk) # eigenvalues epos = sorted(evals[evals>0]) # positive energies eneg = -np.array(sorted(np.abs(evals[evals<0]))) # negative energies for (i,j) in zip(nindex,range(len(nindex))): # loop over bands fo[j].write(str(x)+" "+str(y)+" ") if i>0: fo[j].write(str(epos[i-1])+"\n") if i<0: fo[j].write(str(eneg[abs(i)-1])+"\n") [f.close() for f in fo] # close file
def identity_like_generalized(a): a = asarray(a) if a.ndim == 3: return np.array([identity(a.shape[-2]) for ax in a]) elif a.ndim > 3: raise ValueError("Not implemented...") return identity(a.shape[0])
def test_cell_cubic(): rvecs = np.array([[9.865, 0.0, 0.0], [0.0, 9.865, 0.0], [0.0, 0.0, 9.865]])*angstrom cell = Cell(rvecs) # Test attributes assert cell.nvec == 3 assert (cell.rvecs == rvecs).all() assert (cell.rspacings == 9.865*angstrom).all() assert (cell.gspacings == 1/(9.865*angstrom)).all() assert abs(cell.volume - (9.865*angstrom)**3) < 1e-10 assert abs(np.dot(cell.gvecs, cell.rvecs.transpose()) - np.identity(3)).max() < 1e-5 assert abs(np.dot(cell.gvecs.transpose(), cell.rvecs) - np.identity(3)).max() < 1e-5 cell2 = Cell(-cell.rvecs) assert abs(cell2.volume - (9.865*angstrom)**3) < 1e-10 for i in xrange(3): assert cell.get_rlength(i) == cell.rlengths[i] assert cell.get_glength(i) == cell.glengths[i] assert cell.get_rspacing(i) == cell.rspacings[i] assert cell.get_gspacing(i) == cell.gspacings[i] assert abs(cell.get_rlength(i) - 1.0/cell.get_gspacing(i)) < 1e-10 assert abs(cell.get_glength(i) - 1.0/cell.get_rspacing(i)) < 1e-10 # Test methods (1) vec1 = np.array([10.0, 0.0, 5.0])*angstrom cell.mic(vec1) assert abs(vec1 - np.array([0.135, 0.0, -4.865])*angstrom).max() < 1e-10 cell.add_rvec(vec1, np.array([1,2,3])) assert abs(vec1 - np.array([10.0, 19.73, 24.73])*angstrom).max() < 1e-10 # Test methods (2) check_frac_cart(cell) check_g_lincomb_dot_rvecs(cell)
def transform(args, workspace_lh, workspace_rh, nsubjs): transform_lh = np.zeros((args.nvoxel, args.nfeature, nsubjs)) transform_rh = np.zeros((args.nvoxel, args.nfeature, nsubjs)) if args.align_algo in ["ha", "srm_noneprob"]: transform_lh = workspace_lh["R"] transform_rh = workspace_rh["R"] elif args.align_algo in ["srm", "pca", "ica"]: bW_lh = workspace_lh["bW"] bW_rh = workspace_rh["bW"] for m in range(nsubjs): transform_lh[:, :, m] = bW_lh[m * args.nvoxel : (m + 1) * args.nvoxel, :] transform_rh[:, :, m] = bW_rh[m * args.nvoxel : (m + 1) * args.nvoxel, :] elif args.align_algo in ["ha_sm_retraction"]: bW_lh = workspace_lh["W"] bW_rh = workspace_rh["W"] for m in range(nsubjs): transform_lh[:, :, m] = bW_lh[:, :, m] transform_rh[:, :, m] = bW_rh[:, :, m] elif args.align_algo == "noalign": for m in range(nsubjs): transform_lh[:, :, m] = np.identity(args.nvoxel) transform_rh[:, :, m] = np.identity(args.nvoxel) else: exit("alignment algo not recognized") return (transform_lh, transform_rh)
def ale3dStrainOutToV(vecds): #takes 5 components of evecd and the 6th component is lndetv """convert from vecds representation to symmetry matrix""" eps = num.zeros([3,3],dtype='float64') #Akk_by_3 = sqr3i * vecds[5] # -p a = num.exp(vecds[5])**(1./3.)# -p t1 = sqr2i*vecds[0] t2 = sqr6i*vecds[1] eps[0, 0] = t1 - t2 eps[1, 1] = -t1 - t2 eps[2, 2] = sqr2b3*vecds[1] eps[1, 0] = vecds[2] * sqr2i eps[2, 0] = vecds[3] * sqr2i eps[2, 1] = vecds[4] * sqr2i eps[0, 1] = eps[1, 0] eps[0, 2] = eps[2, 0] eps[1, 2] = eps[2, 1] epstar=eps/a V=(num.identity(3)+epstar)*a Vinv=(num.identity(3)-epstar)/a return V,Vinv
def get_H(ph_count1, at_count1, wc1, wa1, g1, ph_count2, at_count2, wc2, wa2, g2, m = 0.4, RWA=True): #------------------------------------------------------------------------------------------------------------------ # get_H_err(ph_count, at_count, wc, wa, g, RWA) #------------------------------------------------------------------------------------------------------------------ adiag1 = np.sqrt(np.arange(1, ph_count1+1)) across1 = np.diagflat(adiag1, -1) a1 = np.diagflat(adiag1, 1) acrossa1 = np.dot(across1, a1) adiag2 = np.sqrt(np.arange(1, ph_count2+1)) across2 = np.diagflat(adiag2, -1) a2 = np.diagflat(adiag2, 1) acrossa2 = np.dot(across2, a2) #------------------------------------------------------------------------------------------------------------------ sigmadiag = [1] sigmacross = np.diagflat(sigmadiag, -1) sigma = np.diagflat(sigmadiag, 1) sigmacrosssigma = np.dot(sigmacross, sigma) #------------------------------------------------------------------------------------------------------------------ ph1_dim = ph_count1+1 I_ph1 = np.identity(ph1_dim) at1_dim = pow(2, at_count1) I_at1 = np.identity(at1_dim) ph2_dim = ph_count2+1 I_ph2 = np.identity(ph2_dim) at2_dim = pow(2, at_count2) I_at2 = np.identity(at2_dim) #------------------------------------------------------------------------------------------------------------------ if RWA: h1 = H1.get_H_RWA(ph_count1, at_count1, wc1, wa1, g1, RWA) h2 = H1.get_H_RWA(ph_count2, at_count2, wc2, wa2, g2, RWA) else: h1 = H1.get_H_EXACT(ph_count1, at_count1, wc1, wa1, g1) h2 = H1.get_H_EXACT(ph_count2, at_count2, wc2, wa2, g2) H = np.kron(h1, np.identity(h2.shape[0])) + np.kron(np.identity(h1.shape[0]), h2) H1_m = np.kron(across1, np.identity(at1_dim)) H1_m = np.kron(H1_m, a2) H1_m = np.kron(H1_m, np.identity(at2_dim)) H2_m = np.kron(a1, np.identity(at1_dim)) H2_m = np.kron(H2_m, across2) H2_m = np.kron(H2_m, np.identity(at2_dim)) H_m = m * (H1_m + H2_m) #------------------------------------------------------------------------------------------------------------------ H = np.matrix(H + H_m) H_size = np.shape(H) # print('H:\n', H, '\n') # # # print('H_size:', H_size, '\n') #------------------------------------------------------------------------------------------------------------------ return H
def jacobi_eigenvalue_algorithm(a, n, max_iter): def treshold_convergence(a, n, it_num): off_sum = 0.0 for j in range(0, n): for i in range(0, j): off_sum = off_sum + a[i, j]**2 off_sum = sqrt(off_sum) / float(4 * n) if (off_sum == 0.0): return off_sum for p in range(0, n): for q in range(p + 1, n): gapq = 10.0 * abs(a[p, q]) termp = gapq + abs(a[p, p]) termq = gapq + abs(a[q, q]) if 4 < it_num and termp == abs(a[p, p]) and termq == abs(a[q, q]): a[p, q] = 0.0 return off_sum def max_off_diagonal( a, n): # Za pronalazenje najveceg vandijagonalnog elementa max_elem = 0.0 for i in range(n - 1): for j in range(i + 1, n): if abs(a[i, j]) >= max_elem: max_elem = abs(a[i, j]) k = i l = j return max_elem, k, l def rotate(a, n, p, k, l): # Rotiramo kako bismo anulirali a[k, l] diff = a[l, l] - a[k, k] if abs(a[k, l]) < abs(diff) * 0.1: t = a[k, l] / diff else: theta = diff / (2.0 * a[k, l]) t = 1.0 / (abs(theta) + sqrt(theta**2 + 1.0)) if theta < 0.0: t = -t c = 1.0 / sqrt(t**2 + 1.0) s = t * c tau = s / (1.0 + c) temp = a[k, l] a[k, l] = 0.0 a[k, k] = a[k, k] - t * temp a[l, l] = a[l, l] + t * temp for i in range(k): # i < k temp = a[i, k] a[i, k] = temp - s * (a[i, l] + tau * temp) a[i, l] = a[i, l] + s * (temp - tau * a[i, l]) for i in range(k + 1, l): # k < i < l temp = a[k, i] a[k, i] = temp - s * (a[i, l] + tau * a[k, i]) a[i, l] = a[i, l] + s * (temp - tau * a[i, l]) for i in range(l + 1, n): # i > l temp = a[k, i] a[k, i] = temp - s * (a[l, i] + tau * temp) a[l, i] = a[l, i] + s * (temp - tau * a[l, i]) for i in range(n): # Azuriramo matricu transformacije temp = p[i, k] p[i, k] = temp - s * (p[i, l] + tau * p[i, k]) p[i, l] = p[i, l] + s * (temp - tau * p[i, l]) n = len(a) p = identity(n) * 1.0 for it_num in range(max_iter): max_elem, k, l = max_off_diagonal(a, n) off_sum = treshold_convergence(a, n, it_num) if max_elem < 0.000001 or off_sum == 0.0: return diagonal(a), p, it_num rotate(a, n, p, k, l) print('Metoda nije konvergirala')
int(np.around(rateL[pos] * x * 10000)) for x in [t0, t1, t2, t3] ] ancaa = ancseq[pos] anc = np.array([1 if ancaa == aaL[i] else 0 for i in range(20)]) freq = freqL[pos, :] mul = freq / jfreq mul.shape = 1, 20 mulmat = mul.repeat(20, axis=0) jfmat = jmat * mulmat for i in range(20): jfmat[i, i] = 1 - (np.sum(jfmat[i, :]) - jfmat[i, i]) n1aprob = evo(anc, T[0], jfmat) n2aprob = evo(anc, T[2], jfmat) Id = np.identity(20) mat1 = evo(Id, T[1], jfmat) mat2 = evo(Id, T[3], jfmat) for i in range(20): mat1[i, i] = 0 mat2[i, i] = 0 prob = 0 n1aprob.shape = 20, 1 n2aprob.shape = 20, 1 condmat1 = np.multiply(n1aprob, mat1) condmat2 = np.multiply(n2aprob, mat2) condmatsum = np.sum(condmat2, axis=0, keepdims=True) prob = np.sum(np.multiply(condmatsum, condmat1)) probL.append(prob) # print >>OUTSITE,"%f"%(prob)
def fprime_ieqcon2(self, x): """ Vector inequality constraint, derivative """ return np.identity(x.shape[0])
# Misc. import random import warnings # ENVIRONMENT SETUP ######################################################################################## game_name = 'SpaceInvaders-Atari2600' env = retro.make(game=game_name) print("***************** {} *****************".format(game_name)) print("Frame size: {}".format(env.observation_space)) print("Action size: {}\n".format(env.action_space.n)) # Here we create an hot encoded version of our actions # possible_actions = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0]...] possible_actions = np.array(np.identity(env.action_space.n,dtype=int).tolist()) # PRE-PROCESSING FUNCTIONS ################################################################################# """ preprocess_frame: Take a frame. Grayscale it Resize it. __________________ | | | | | | | | |_________________|
def get_coordconv_matrix(coord): """Return the rotation matrix corresponding to coord systems given in coord. Usage: matconv,do_conv,normcoord = get_coordconv_matrix(coord) Input: - coord: a tuple with initial and final coord systems. See normalise_coord. Output: - matconv: the euler matrix for coord sys conversion - do_conv: True if matconv is not identity, False otherwise - normcoord: the tuple of initial and final coord sys. History: adapted from CGIS IDL library. """ coord_norm = normalise_coord(coord) if coord_norm[0] == coord_norm[1]: matconv = np.identity(3) do_conv = False else: eps = 23.452294 - 0.0130125 - 1.63889E-6 + 5.02778E-7 eps = eps * np.pi / 180. # ecliptic to galactic e2g = np.array([[-0.054882486, -0.993821033, -0.096476249], [ 0.494116468, -0.110993846, 0.862281440], [-0.867661702, -0.000346354, 0.497154957]]) # ecliptic to equatorial e2q = np.array([[1., 0. , 0. ], [0., np.cos( eps ), -1. * np.sin( eps )], [0., np.sin( eps ), np.cos( eps ) ]]) # galactic to ecliptic g2e = np.linalg.inv(e2g) # galactic to equatorial g2q = np.dot(e2q , g2e) # equatorial to ecliptic q2e = np.linalg.inv(e2q) # equatorial to galactic q2g = np.dot(e2g , q2e) if coord_norm == ('E','G'): matconv = e2g elif coord_norm == ('G','E'): matconv = g2e elif coord_norm == ('E','C'): matconv = e2q elif coord_norm == ('C','E'): matconv = q2e elif coord_norm == ('C','G'): matconv = q2g elif coord_norm == ('G','C'): matconv = g2q else: raise ValueError('Wrong coord transform :',coord_norm) do_conv = True return matconv,do_conv,coord_norm
def isRotationMatrix(R): Rt = np.transpose(R) shouldBeIdentity = np.dot(Rt, R) I = np.identity(3, dtype=R.dtype) n = np.linalg.norm(I - shouldBeIdentity) return n < 1e-6
def bayesian1(name): data1 = getcurrent(name.lower()) data = [] a = 390 - len(data) for i in range(0, len(data1)): data.append(data1[i][1]) #print data x_10 = [] for b in xrange(0, a): t_data = [] for i in xrange(len(data) - 10, len(data)): t_data.append(data[i]) for i in xrange(1, 11): x_10.append(i) t = [] t.append(t_data) t_data = t #print t_data N = 10 M = 6 rel_err_dr = 0 x = x_10[len(x_10) - 1] + 1 for k in range(1): t = numpy.zeros((N, 1), float) phi = numpy.zeros((M, 1), float) phi_sum = numpy.zeros((M, 1), float) phi_sum_t = numpy.zeros((M, 1), float) for i in range(M): phi[i][0] = math.pow(x, i) for i in range(N): t[i][0] = t_data[k][i] for j in range(N): for i in range(M): phi_sum[i][0] = phi_sum[i][0] + math.pow(x_10[j], i) phi_sum_t[i][0] = phi_sum_t[i][0] + t[j][0] * math.pow( x_10[j], i) # Calculation of variance / standard deviation S = numpy.linalg.inv(0.005 * numpy.identity(M) + 11.1 * numpy.dot(phi_sum, phi.T)) var = numpy.dot((phi.T), numpy.dot(S, phi)) var = var + 1 / 11.1 # Calculating the mean mean = 11.1 * numpy.dot(phi.T, numpy.dot(S, phi_sum_t)) #error_n=0 #error_n=error_n+math.fabs(t_actual[k]-mean) #abs_error=0 #abs_error = abs_error + error_n mean = mean[0][0] #print 'mean', mean data.append(mean) t = t_data[0] t_data = t sum = 0 avg = 0 for i in t_data: sum += i mov = sum / len(t_data) #print 'mov', mov per = ((mean - mov) / mov) * 100 #print 'per', per final = [] mean = round(mean, 3) per = round(per, 3) final.append(mean) final.append(per) return final[0]
#!/usr/bin/env python import numpy as np zero = np.zeros([2, 2]) identity = np.identity(2) metric = np.array([ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]) sigma_1 = np.array([[0, 1], [1, 0]]) sigma_2 = np.array([[0, -1j], [1j, 0]]) sigma_3 = np.array([[1, 0], [0, -1]]) sigma = [sigma_1, sigma_2, sigma_3] gamma_0 = np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.complex128) gamma_1 = np.concatenate([np.concatenate([zero, -sigma_1]), np.concatenate([sigma_1, zero])], axis=1) gamma_2 = np.concatenate([np.concatenate([zero, -sigma_2]), np.concatenate([sigma_2, zero])], axis=1) gamma_3 = np.concatenate([np.concatenate([zero, -sigma_3]), np.concatenate([sigma_3, zero])], axis=1) gammaM_0 = np.concatenate([np.concatenate([zero, sigma_2]), np.concatenate([sigma_2, zero])], axis=1) gammaM_1 = np.concatenate([np.concatenate([1j * sigma_3, zero]), np.concatenate([zero, 1j * sigma_3])], axis=1) gammaM_2 = np.concatenate([np.concatenate([zero, sigma_2]), np.concatenate([-sigma_2, zero])], axis=1) gammaM_3 = np.concatenate([np.concatenate([-1j * sigma_1, zero]), np.concatenate([zero, -1j * sigma_1])], axis=1)
def stage7(self, guests): """ arrange guest atoms """ self.logger.info("Stage7: Atomic positions of the guest.") if self.cagepos is not None: # the cages around the dopants. dopants_neighbors = self.dopants_info(self.dopants, self.reppositions, self.repcagepos, self.repcell) # put the (one-off) groups if len(self.spot_groups) > 0: # process the -H option for cage, group_to in self.spot_groups.items(): group, root = group_to.split(":") self.add_group(cage, group, int(root)) molecules = defaultdict(list) if len(self.spot_guests) > 0: # process the -G option for cage, molec in self.spot_guests.items(): molecules[molec].append(cage) self.filled_cages.add(cage) if guests is not None: # process the -g option for arg in guests: cagetype, spec = arg[0].split("=") assert cagetype in self.cagetypes, "Nonexistent cage type: {0}".format( cagetype) resident = dict() rooms = list(self.cagetypes[cagetype] - self.filled_cages) for room in rooms: resident[room] = None # spec contains a formula consisting of "+" and "*" contents = spec.split("+") vacant = len(rooms) for content in contents: if "*" in content: molec, frac = content.split("*") frac = float(frac) else: molec = content frac = 1.0 nmolec = int(frac * len(rooms) + 0.5) vacant -= nmolec assert vacant >= 0, "Too many guests." remain = nmolec movedin = [] while remain > 0: r = random.randint(0, len(rooms) - 1) room = rooms[r] if resident[room] is None: resident[room] = molec molecules[molec].append(room) movedin.append(room) remain -= 1 #self.logger.info( # " {0} * {1} @ {2}".format(molec, nmolec, movedin)) # Now ge got the address book of the molecules. if len(molecules): self.logger.info(" Summary of guest placements:") self.guests_info(self.cagetypes, molecules) if len(self.spot_groups) > 0: self.logger.info(" Summary of groups:") self.groups_info(self.groups) # semi-guests for root, cages in self.groups.items(): assert root in self.dopants name = self.dopants[root] molname = "G{0}".format(root) pos = self.reppositions[root] rot = self.rotmatrices[root] self.atoms.append( [0, molname, name, self.repcell.rel2abs(pos), 0]) del self.dopants[root] # processed. self.logger.debug((root, cages, name, molname, pos, rot)) for cage, group in cages.items(): assert group in self.groups_placer assert cage in dopants_neighbors[root] cpos = self.repcagepos[cage] self.atoms += self.groups_placer[group](cpos, pos, self.repcell, molname) # molecular guests for molec, cages in molecules.items(): gmol = safe_import("molecule", molec) cpos = [self.repcagepos[i] for i in cages] cmat = [np.identity(3) for i in cages] self.atoms += arrange_atoms(cpos, self.repcell, cmat, gmol.sites, gmol.labels, gmol.name) # Assume the dopant is monatomic and replaces one water molecule atomset = defaultdict(set) for label, name in self.dopants.items(): atomset[name].add(label) for name, labels in atomset.items(): pos = [self.reppositions[i] for i in sorted(labels)] rot = [self.rotmatrices[i] for i in sorted(labels)] self.atoms += arrange_atoms(pos, self.repcell, rot, [ [0., 0., 0.], ], [name], name) self.logger.info("Stage7: end.")
# -*- coding: utf-8 -* import tensorflow as tf import numpy as np # np.identity 生成法向矢量 embedding = np.identity(5, dtype=np.int32) ids = tf.placeholder(tf.int32, [None]) result = tf.nn.embedding_lookup(embedding, ids) # RNN中 embedding 是为了把每个词打散,方便放入cell? with tf.Session() as sess: # 可以看出如果feed_dict={ids:[1,2,1]},则取了embedding的第2行、第3行和第2行, # embedding_lookup里面有不同的partition的方法,都大同小异。 # feed_dict 还可以是多维度的。 print sess.run(result, feed_dict={ids: [1, 2, 1]}) ''' TODO: Callback '''
def test_linear_sum_assignment_input_string(): I = np.identity(3) with pytest.raises(TypeError, match="Cannot cast array data"): linear_sum_assignment(I.astype(str))
ave_dist_tgt = np.linalg.norm(np.mean(verts_tgt_visi, axis=0)) scale = ave_dist_gt / ave_dist_tgt verts_tgt *= scale verts_tgt_visi *= scale # do ICP source = open3d.PointCloud() target = open3d.PointCloud() source.points = open3d.Vector3dVector(verts_tgt_visi) target.points = open3d.Vector3dVector(verts_gt_visi) reg_p2p = open3d.registration_icp( source, target, 10, np.identity(4), open3d.TransformationEstimationPointToPoint(), open3d.ICPConvergenceCriteria(max_iteration=10000), ) trans_mat = reg_p2p.transformation verts_tgt_fit = np.zeros(verts_tgt.shape) verts_tgt_visi_fit = np.zeros(verts_tgt_visi.shape) for j in range(len(verts_tgt)): verts_tgt_fit[j] = np.dot(trans_mat[:3, :3], verts_tgt[j]) + trans_mat[:3, 3] for j in range(len(verts_tgt_visi)): verts_tgt_visi_fit[j] = np.dot(trans_mat[:3, :3], verts_tgt_visi[j]) + trans_mat[:3, 3]
def get_dK_y_dnoise(self) -> np.ndarray: return 2 * self.noise_scale * np.identity(self.X.shape[0])
def test_linear_sum_assignment_input_inf(): I = np.identity(3) I[:, 0] = np.inf with pytest.raises(ValueError, match="cost matrix is infeasible"): linear_sum_assignment(I)
ASC.set_values('sigma',2) ASC.set_values('kernel_type','Gaussian Kernel') ASC.run() #np.savetxt('Four_gaussian_label_1.csv', db['Y_matrix'], delimiter=',', fmt='%d') a = db['allocation'] print 'Original allocation :' , a b = np.concatenate((np.zeros(200), np.ones(200))) print "NMI : " , normalized_mutual_info_score(a,b) else: # Predefining the original clustering, the following are the required settings ASC.set_values('q',1) ASC.set_values('C_num',2) ASC.set_values('sigma',4) ASC.set_values('kernel_type','Gaussian Kernel') ASC.set_values('W_matrix',np.identity(db['d'])) db['Y_matrix'] = label_2 db['U_matrix'] = label_2 db['prev_clust'] = 1 db['allocation'] = Y_2_allocation(label_2) a = db['allocation'] b = np.concatenate((np.zeros(200), np.ones(200))) print 'Predefined allocation :' , a , '\n' if True: # Alterntive clustering ASC.set_values('sigma',1) start_time = time.time() ASC.run()
def test_linear_sum_assignment_input_bool(): I = np.identity(3) assert_array_equal(linear_sum_assignment(I.astype(np.bool_)), linear_sum_assignment(I))
def set_corrected_dofs_helper_fixedy(self, node, transform): transform_new = dc(transform) node.corrected_transform = transform_new # node.corrected_position = node.corrected_transform.dot(vec3tovec4(node.offset)) transform_new = dc( node.corrected_transform.dot(translation(node.offset))) if (node.name == "End Site"): node.corrected_dof_values = [] return if (node.depth == 0): # if(True): inter_transform = np.identity(4) for i in range(len(node.dofs)): dof = node.dofs[i] dof_val = node.dof_values[i] rot = np.identity(4) if (dof == "Xposition"): rot = translation((dof_val, 0, 0)) elif (dof == "Yposition"): rot = translation((0, dof_val, 0)) elif (dof == "Zposition"): rot = translation((0, 0, dof_val)) elif (dof == "Xrotation"): rot = rotation_x(dof_val) elif (dof == "Yrotation"): rot = rotation_y(dof_val) elif (dof == "Zrotation"): rot = rotation_z(dof_val) inter_transform = inter_transform.dot(rot) # transform_new = transform_new.dot(rot) transform_new = transform_new.dot(inter_transform) node.corrected_dof_values = node.dof_values[:] node.noised_position = node.position[:3] node.corrected_position = node.noised_position[:] for child in node.children: child.noised_position = child.position[:3] child.corrected_position = child.noised_position[:] # print(inter_transform) # transform_new2 = transform_new # print(inter_transform) # print("inter_transform",inter_transform) # if(True): else: TA = np.empty((len(node.children), 3)) i = 0 for child in node.children: vec = vec4tovec3( np.linalg.inv(transform_new).dot( vec3tovec4(child.corrected_position))) vec = vec / np.linalg.norm(vec) rot = np.array(child.offset) rot = rot / np.linalg.norm(rot) ta, ret_val = find_transform(rot, vec, node.dof_values[-1], node.dof_values) if (ret_val == -1): child.noised_position = child.position[:3] diff = np.array(child.noised_position) - np.array( node.corrected_position) lengthratio = norm2(diff) / norm2(child.offset) diff = diff / lengthratio diff = diff + np.array(node.corrected_position) child.corrected_position = diff.tolist() TA[i, :] = np.array([ta[0], ta[1], ta[2]]) i += 1 ta = np.mean(TA, axis=0) trans = rotation_z(ta[0]).dot(rotation_x(ta[1])).dot( rotation_y(ta[2])) transform_new = transform_new.dot(trans) node.corrected_dof_values = [ta[0], ta[1], ta[2]] for child in node.children: self.set_corrected_dofs_helper(child, dc(transform_new))
# divide them all by the pitcher_outs_recorded column y = y / y[:, 0:1] * 27 # get rid of the pitcher_outs_recorded since they're all the same now y = y[:, 1:] # standardize the output fields y = (y - np.mean(y, axis=0, keepdims=True)) / np.std(y, axis=0, keepdims=True) # get the X values X = season_matrix[:, 0:4] X, encodings = one_hot_columns(X, [0, 1, 2]) # add a row to the matrix for each feature to regularize it regularizer = np.identity(X.shape[1]) * 16 for i in range(len(encodings[0]) + len(encodings[1]), regularizer.shape[0] - 1): regularizer[i, i] = 1 # if there are no DH's this season, add a regularizer to the DH column so the matrix isn't singular if np.mean(X[:, -1]) == 0: regularizer[-1, -1] = 1 else: regularizer[-1, -1] = 0 X = np.vstack([X, regularizer]) y = np.vstack([y, np.zeros((X.shape[1], y.shape[1]))]) XTX = X.T.dot(X) XTy = X.T.dot(y) w = la.solve(XTX, XTy)
def set_corrected_dofs(self, noise_std): transform = np.identity(4) self.set_corrected_dofs_helper(self.root, transform)
def translation(xyz): tran = np.identity(4) tran[0, 3] = xyz[0] tran[1, 3] = xyz[1] tran[2, 3] = xyz[2] return tran
def generate_full_rank_matrix(input_matrix): '''do Gaussian elimination and return the decomposed matrices input_matrix: (matrix) return (link_matrix, kernel_matrix, independent_list) ''' m, n = input_matrix.shape reduced_matrix = input_matrix.copy() pivots = numpy.identity(m, dtype=numpy.float) dependent_list = list(range(m)) independent_list = [] skipped_list = [] skipped_buffer = [] for j in range(n): if len(dependent_list) == 0: break maxidx = dependent_list[0] maxelem = reduced_matrix[maxidx][j] for i in dependent_list: if abs(reduced_matrix[i][j]) > abs(maxelem): maxidx = i maxelem = reduced_matrix[i][j] if maxelem != 0: reduced_matrix[maxidx] /= maxelem pivots[maxidx] /= maxelem for i in range(m): if i != maxidx: k = reduced_matrix[i][j] reduced_matrix[i] -= k * reduced_matrix[maxidx] pivots[i] -= k * pivots[maxidx] if len(skipped_buffer) > 0: skipped_list.extend(skipped_buffer) skipped_buffer = [] dependent_list.remove(maxidx) independent_list.append(maxidx) else: skipped_buffer.append(j) assert len(dependent_list) + len(independent_list) == m link_matrix = numpy.identity(m, dtype=numpy.float) link_matrix[dependent_list] -= pivots[dependent_list] rank = len(independent_list) kernel_matrix = numpy.zeros((n, n - rank), dtype=numpy.float) parsed_rank = rank + len(skipped_list) reduced_matrix = numpy.take(reduced_matrix, list(range(parsed_rank, n)) + skipped_list, 1) cnt1 = 0 cnt2 = 0 for i in range(parsed_rank): if len(skipped_list) > cnt1 and skipped_list[cnt1] == i: kernel_matrix[i][n - parsed_rank + cnt1] = 1.0 cnt1 += 1 else: kernel_matrix[i][range(n - rank)] = -reduced_matrix[independent_list[cnt2]] cnt2 += 1 for i in range(n - parsed_rank): kernel_matrix[i + parsed_rank][i] = 1.0 independent_list = numpy.sort(independent_list) link_matrix = numpy.take(link_matrix, independent_list, 1) return (link_matrix, kernel_matrix, independent_list)
def set_corrected_dofs_helper(self, node, transform): transform_new = dc(transform) node.corrected_transform = transform_new # node.corrected_position = node.corrected_transform.dot(vec3tovec4(node.offset)) transform_new = dc( node.corrected_transform.dot(translation(node.offset))) if (node.name == "End Site" or not node.isreal): node.corrected_dof_values = [] return if (node.depth == 0): # if(True): inter_transform = np.identity(4) for i in range(len(node.dofs)): dof = node.dofs[i] dof_val = node.dof_values[i] rot = np.identity(4) if (dof == "Xposition"): rot = translation((dof_val, 0, 0)) elif (dof == "Yposition"): rot = translation((0, dof_val, 0)) elif (dof == "Zposition"): rot = translation((0, 0, dof_val)) elif (dof == "Xrotation"): rot = rotation_x(dof_val) elif (dof == "Yrotation"): rot = rotation_y(dof_val) elif (dof == "Zrotation"): rot = rotation_z(dof_val) inter_transform = inter_transform.dot(rot) # transform_new = transform_new.dot(rot) transform_new = transform_new.dot(inter_transform) node.corrected_dof_values = node.dof_values[:] node.noised_position = node.position[:3] node.corrected_position = node.noised_position[:] for child in node.children: child.noised_position = child.position[:3] child.corrected_position = child.noised_position[:] else: TA = np.empty((len(node.children), 3)) i = 0 finapos = [] initpos = [] for child in node.children: vec = vec4tovec3( np.linalg.inv(transform_new).dot( vec3tovec4(child.corrected_position))) vec = vec / np.linalg.norm(vec) rot = np.array(child.offset) rot = rot / np.linalg.norm(rot) finapos.append(vec) initpos.append(rot) ta = self.find_rotation(initpos, finapos) # print(ta) trans = rotation_z(ta[0]).dot(rotation_x(ta[1])).dot( rotation_y(ta[2])) # print(trans) transform_new = transform_new.dot(trans) node.corrected_dof_values = [ta[0], ta[1], ta[2]] for child in node.children: self.set_corrected_dofs_helper(child, dc(transform_new))
def pivot_matrix(A): ''' Essa função recebe uma matriz 'A' e faz a pivotizacao dela retornando a matriz de pivo e inclusive a matriz de permutacao ''' n = A.shape[0] m = A.shape[1] aux = A.copy() permutation = np.identity(n) if n == m or m > n: for j in range(m):#percorre toda a matriz pelas colunas if j == n: break #recebe o index maximo do valor máximo da coluna index = np.argmax(aux[:,j]) if index < j: #Indentificar se o index de valor máximo é da submatriz temporary = np.max(aux[j:n,j]) #Encontra o index verdadeiro index = np.argwhere(aux[:,j] == temporary).flatten() if index.size > 1:#Se existir mais de um index igual seleciona o primeiro index = int(index[0].copy()) else:#Se o valor máximo for diferente index = int(np.argwhere(aux[:,j] == temporary).flatten()) if index >= j:# Se o index estiver na submatriz temp = aux[j,:].copy()#recebe a linha que sera deslocada aux[j,:] = aux[index,:].copy()#linha que sera colocada acima aux[index,:] = temp.copy()#desloca a linha para baixo temp2 = permutation[j,:].copy()#matriz de permutacao permutation[j,:] = permutation[index,:] permutation[index,:] = temp2.copy() else:#Essa parte e para matrizes altas com n>m for j in range(n): if j == m: break #recebe o index maximo do valor máximo da coluna index = np.argmax(aux[:,j]) #print(index) if index < j: #Indentificar se o index de valor máximo é da submatriz temporary = np.max(aux[j:n,j]) #Encontra o index verdadeiro index = np.argwhere(aux[:,j] == temporary).flatten() if index.size > 1: index = int(index[0].copy()) else: index = int(np.argwhere(aux[:,j] == temporary).flatten()) if index >= j: temp = aux[j,:].copy() aux[j,:] = aux[index,:].copy() aux[index,:] = temp.copy() temp2 = permutation[j,:].copy() permutation[j,:] = permutation[index,:] permutation[index,:] = temp2.copy() return aux,permutation
def set_transformations_values(self): transform = np.identity(4) self.set_transformations_values_helper(self.root, transform)
def matrix(self): m = np.identity(4) m[:3, :3] = self.R m[:3, 3] = self.t return m
def gradient(x , y , lamda): y = np.asmatrix(y) lamdaI = lamda * np.identity(x.shape[1]) inverse = np.linalg.inv(np.dot(x.transpose() , x) + lamdaI) return np.dot( np.dot( inverse , x.transpose() ) , y.transpose() )
a_s = 1000 # soil-updating parameter (maybe reduce this value? a_s) b_s = 0.1 c_s = 1 #theta_s = rho = 0.9 rho_n = 0.1 # local soild updating parameter rho_IWD = 0.1 #global soil-updating parameter epsilon_s = 0.0001 # to prevent zero division weight_infogain = 1 # for weighted gene ranking used in computing time(i,j) # Initialization of dynamic parameters soilMatrix = np.identity(nodes) soilMatrix.fill(1000) for a in range(nodes): # Precaution: Increase the soil for a node to itself so as to have little probability of picking itself soilMatrix[a][a]=sys.maxsize probMatrix = np.identity(nodes) probMatrix.fill(0) initialVelocity = 100 # Initial velocity of each IWD velocity = initialVelocity unvisited = range(nodes) # List of cities not visited, initially this # contains even the initial node.
import sys import navio.util import navio.mpu9250 import navio.adc import navio.util import serial import threading import VL53L0X del_t = 0.01 #sec g = 0 #m/s^2 x_old = np.array([0,0,0,0,0,0,0,0,0],dtype=np.float) #m, m/s, rad omega = np.array([0,0,0],dtype=np.float) #rad/sec acc = np.array([0,0,-g],dtype=np.float) #m/s^2 gamma = 800 # 0~1000 P_old = np.identity(9)*gamma I_9 = np.identity(9) acc_noise=0.001 gyro_noise=0.0003468268 QQ = np.diag([0,0,0,acc_noise,acc_noise,acc_noise,gyro_noise,gyro_noise,gyro_noise]) RR = np.diag([0.4,0.4,0.4,0.02,0.02,0.02,0.02,0.01]) alfa = np.array([0.8244,0.8244,0.8244],dtype=np.float) m9a_low_old = np.array([0,0,0],dtype=np.float) m9g_low_old = np.array([0,0,0],dtype=np.float) anchor1 = np.array([-6.000, 3.500, 2.5],dtype=float) anchor2 = np.array([ 8.695, 3.514, 2.5],dtype=float) anchor3 = np.array([ 8.436, -8.166, 2.5],dtype=float) anchor4 = np.array([-6.003, -8.160, 2.5],dtype=float)
def main(): vec , lam = eigenvalue_inv(A,x,n,10**(-8)) best_vec, best_lam = eigenvalue_inv(A+4*np.identity(len(A)),vec,n,10**(-8)) print("Lambda is %s"%(lam))
def read_cortex_surface_segmentation(fsdir, physical_to_index, fsconfig, affine=None): """ Read the cortex gyri surface segmentatation of freesurfer. Give access to the right and left hemisphere segmentations that can be projected on the cortical and inflated cortical surfaces. The vertex are expressed in the voxel coordinates. Parameters ---------- fsdir: str( mandatory) the subject freesurfer segmentation directory. physical_to_index: array (mandatory) the transformation to project a physical point in an array. fsconfig: str (mandatory) the freesurfer configuration file. affine: array (optional, default None) an affine transformation in voxel coordinates that will be applied on the output vertex of the cortex surface. Returns ------- segmentation: dict contain the two hemisphere 'lh' and 'rh' triangular surfaces and inflated surfaces represented in a TriSurface structure. """ # Construct the path to the surface segmentation results and associated # labels meshdir = os.path.join(fsdir, "surf") labeldir = os.path.join(fsdir, "label") segfile = os.path.join(fsdir, "mri") # Get deformation between the ras and ras-tkregister spaces asegfile = os.path.join(segfile, "aseg.mgz") translation = tkregister_translation(asegfile, fsconfig) # Construct the deformation to apply on the cortex mesh if affine is None: affine = numpy.identity(4) deformation = numpy.dot(affine, numpy.dot(physical_to_index, translation)) # Create an dictionary to contain all the surfaces and labels segmentation = {} # Select the hemisphere for hemi in ["lh", "rh"]: # Get annotation id at each vertex (if a vertex does not belong # to any label and orig_ids=False, its id will be set to -1) and # the names of the labels annotfile = os.path.join(labeldir, "{0}.aparc.annot".format(hemi)) labels, ctab, regions = freesurfer.read_annot( annotfile, orig_ids=False) meta = dict((index, {"region": item[0], "color": item[1][:4].tolist()}) for index, item in enumerate(zip(regions, ctab))) # Select the surface type hemisegmentation = {} for surf in ["white", "inflated"]: # Load the mesh: a 2-uplet with vertex (x, y, z) coordinates and # mesh triangles meshfile = os.path.join(meshdir, "{0}.{1}".format(hemi, surf)) mesh = freesurfer.read_geometry(meshfile) hemisegmentation[surf] = { "vertices": apply_affine_on_mesh(mesh[0], deformation), "triangles": mesh[1] } # Save the segmentation result segmentation[hemi] = TriSurface( vertices=hemisegmentation["white"]["vertices"], inflated_vertices=hemisegmentation["inflated"]["vertices"], triangles=hemisegmentation["white"]["triangles"], labels=labels, metadata=meta) return segmentation