def SymmetricalMatch(inj1,inj2): # try to find a transformation T (combining rotations and symmetries) # that minimizes the L2 norm between inj1 and T(inj2) bestinj2=inj2; bestnorm=numpy.norm(inj1-bestinj2,2) newinj2=inj2; for mirrortype in range(0,4): newinj2=Mirror(newinj2,mirrortype) for i in range(0,4): newinj2=Rotation(newinj2) newnorm=numpy.norm(inj1-newinj2,2) if (newnorm<bestnorm): bestnorm=newnorm bestinj2=newinj2 return bestinj2
def gauss_seidel(A, b, x_0, omega, tol, n_max): k = 0 #p=1 n = np.size(A, 1) x = np.zeros_like(x_0) r = b-np.dot(A, x_0) err = np.norm(r) r_0 = np.norm(r) x_anterior = x_0 s = lu_fact(a, b) while(err > tol)and(k < n_max): k = k+1 for i in xrange(0, n): s = 0 for j in xrange(0, i-1): s = s+A[i, j] * x[j] for j in xrange(i+1, n): s = s+A[i, j]*x_anterior[j] x[i] = omega * (b[i]-s)/A[i, i]+(1+omega) * x_anterior[i] r = b-np.dot(A, x) err = np.norm(r)/r_0 x_anterior = x return x
def is_normalized(self): """Check if the dual quaternion is normalized""" if np.isclose(np.norm(self.q_r), 0): return True rot_normalized = np.isclose(np.norm(self.q_r), 1) trans_normalized = np.isclose(self.q_d / np.norm(self.q_r), self.q_d) return rot_normalized and trans_normalized
def check(self, fore, back): id = self.slice[0] template = self.template[:fore.shape[0], :fore.shape[1], :] fore_warp = torch.round(template + fore).long() fore_warp[:, :, 0] = torch.clamp(fore_warp[:, :, 0], min=0, max=fore.shape[0] - 1) fore_warp[:, :, 1] = torch.clamp(fore_warp[:, :, 1], min=0, max=fore.shape[1] - 1) fore_result=torch.where(torch.norm(back[fore_warp[:,:,0],fore_warp[:,:,1]]+fore,2,dim=2)<=1, \ self.one,self.zero) back_warp = torch.round(template + back).long() back_warp[:, :, 0] = torch.clamp(back_warp[:, :, 0], min=0, max=fore.shape[0] - 1) back_warp[:, :, 1] = torch.clamp(back_warp[:, :, 1], min=0, max=fore.shape[1] - 1) back_result=torch.where(torch.norm(fore[back_warp[:,:,0],back_warp[:,:,1]]+back,2,dim=2)<=1, \ self.one,self.zero) fore *= fore_result.unsqueeze(-1) back *= back_result.unsqueeze(-1) fore_warp = torch.round(template + fore).long() fore_warp[:, :, 0] = torch.clamp(fore_warp[:, :, 0], min=0, max=fore.shape[0] - 1) fore_warp[:, :, 1] = torch.clamp(fore_warp[:, :, 1], min=0, max=fore.shape[1] - 1) fore_result=torch.where(torch.norm(back[fore_warp[:,:,0],fore_warp[:,:,1]]+fore,2,dim=2)<=1, \ self.one,self.zero) return fore_result, fore_warp
def calculate_score(e_cont, e_resp, e_propcont, e_propresp): context_term = np.dot(e_cont, e_propcont) / ( np.norm(e_cont) * np.norm(e_propcont)) # cosine similarity response_term = np.dot( e_resp, e_propresp) / (np.norm(e_resp) * np.norm(e_propresp)) return context_term + 0.2 * response_term # TODO: come up with better formula
def angle(x0, x1, x2): """ Return angle between three points. """ assert x1.shape == x2.shape == (2, ) a, b = x1 - x0, x1 - x2 return np.arccos(np.dot(a, b) / (np.norm(a) * np.norm(b)))
def test_goal(vertical_lines, horizontal_lines, screen_size, it, frame, fs): distance1 = 45 distance2 = 90 flag = True if len(horizontal_lines) > 0: for i, v in enumerate(vertical_lines): d1 = np.norm( np.cross(vertical_lines[i]) - v[0], v[1], vertical_lines[i]) / np.norm(vertical_lines[i] - v[0]) error = 0.1 if distance1 * error < d1 < distance1 * (error + 1): for j, h in enumerate(horizontal_lines): d2 = np.norm( np.cross(horizontal_lines[j]) - h[0], h[1], horizontal_lines[j]) / np.norm(horizontal_lines[j] - h[0]) error = 0.1 if distance2 * error < d2 < distance2 * (error + 1): dist = distance(d1, d2) flag = is_between(d1, d2, d2) try: if screen_size[it]: fs[0], fs[1] = it, screen_size[it] except: pass if fs[0] < it < fs[1]: write_gol(frame)
def gauss_seidel(A, b, x_0, omega, tol, n_max): k = 0 #p=1 n = np.size(A, 1) x = np.zeros_like(x_0) r = b - np.dot(A, x_0) err = np.norm(r) r_0 = np.norm(r) x_anterior = x_0 s = lu_fact(a, b) while (err > tol) and (k < n_max): k = k + 1 for i in xrange(0, n): s = 0 for j in xrange(0, i - 1): s = s + A[i, j] * x[j] for j in xrange(i + 1, n): s = s + A[i, j] * x_anterior[j] x[i] = omega * (b[i] - s) / A[i, i] + (1 + omega) * x_anterior[i] r = b - np.dot(A, x) err = np.norm(r) / r_0 x_anterior = x return x
def E(self, x): # pylint: disable=invalid-name """ Electric field vector. """ x = np.array(x) x1, x2, lam = self.x1, self.x2, self.lam # Get lengths and angles for the different triangles theta1, theta2 = angle(x, x1, x2), pi - angle(x, x2, x1) a = point_line_distance(x, x1, x2) r1, r2 = np.norm(x - x1), norm(x - x2) # Calculate the parallel and perpendicular components sign = where(is_left(x, x1, x2), 1, -1) # pylint: disable=invalid-name Epara = lam * (1 / r2 - 1 / r1) Eperp = -sign * lam * (np.cos(theta2) - np.cos(theta1)) / where( a == 0, infty, a) # Transform into the coordinate space and return dx = x2 - x1 if len(x.shape) == 2: Epara = Epara[::, newaxis] Eperp = Eperp[::, newaxis] return Eperp * (np.array([-dx[1], dx[0]]) / np.norm(dx)) + Epara * (dx / np.norm(dx))
def rotation_matrix(vector: np.float64, pos_parts: np.float64, axis: str = "z"): normed_vector = vector.copy() normed_vector /= np.norm(vector) # Directional vector describing the axis we wish to look 'down' original_direction = np.array([0.0, 0.0, 0.0], dtype=np.float64) switch = {"x": 0, "y": 1, "z": 2} try: original_direction[switch[axis]] = 1.0 except KeyError: raise ValueError( f"Parameter axis must be one of x, y, or z. You supplied {axis}.") dot_product = np.dot(original_direction, normed_vector) cross_product = np.cross(original_direction, normed_vector) mod_cross_product = np.norm(cross_product) cross_product /= mod_cross_product theta = np.arccos(dot_product) q0 = np.cos(theta / 2) q1 = np.sin(theta / 2) * cross_product[0] q2 = np.sin(theta / 2) * cross_product[1] q3 = np.sin(theta / 2) * cross_product[2] # Skew symmetric matrix for cross product Q = np.array([ [ q0**2 + q1**2 - q2**2 - q3**2, 2 * (q1 * q2 - q0 * q3), 2 * (q1 * q3 + q0 * q2), ], [ 2 * (q2 * q1 + q0 * q3), q0**2 - q1**2 + q2**2 - q3**2, 2 * (q3 * q2 - q0 * q1), ], [ 2 * (q1 * q3 - q0 * q2), 2 * (q3 * q2 + q0 * q1), q0**2 - q1**2 - q2**2 + q3**2, ], ]) i = np.array([1, 0, 0]) j = np.array([0, 1, 0]) k = np.array([0, 0, 1]) u = np.matmul(Q, i) v = np.matmul(Q, j) w = np.matmul(Q, k) pos_face_on = pos_parts.copy() pos_face_on[:, 0] = np.dot(pos_parts, u) pos_face_on[:, 1] = np.dot(pos_parts, v) pos_face_on[:, 2] = np.dot(pos_parts, w) return pos_face_on
def _pior(self, A, B): """prior distribution of Reduced Rank Regression \varphi \varpropto \text{exp} \left( - 2.0 \cdot 10^{-5} ( \| A \| + \| B \|) ) """ return np.exp(-2.0 * 10**(-5) * (np.power(np.norm(A), 2) + np.power(np.norm(B), 2)))
def step(self, action): pre_eng_diff = self._get_energy_diff() self.sequence[np.floor(action / self.ALPHABET_SIZE)] = action % self.ALPHABET_SIZE post_eng_diff = self._get_energy_diff() reward = np.norm(pre_eng_diff) - np.norm(post_eng_diff) next_state = self.get_state() is_finished = np.norm(post_eng_diff) < self.DONE_THRESHOLD return next_state, reward, is_finished
def instalar_requísitos(): print('Instalando paquetes requísitos...') lista_paquetes = [] if np is None: lista_paquetes.append('numpy') if matplotlib is None: lista_paquetes.append('matplotlib') if estad is None: lista_paquetes.append('scipy') if pymc is None: lista_paquetes.append('pymc') if len(lista_paquetes): if not os.path.exists(directorio_móds): os.makedirs(directorio_móds) dir_creado = True else: dir_creado = False # Actualizar Pip _actualizar_pip() # Instalar cada paquete necesario for paq in lista_paquetes: _instalar_whl(paq) if dir_creado: shutil.rmtree('Módulos') # Verificar que todo esté bien: try: import numpy as _ import scipy as _ import pymc as _ import matplotlib as _ except ImportError: _ = None pass try: import scipy.stats as _ _.norm() print( '¡Todo bien! Los paquetes Python necesarios han sido instalados.') except ImportError: _ = None avisar( '¡Error! Por experencia personal, probablemente es porque no instalaste la versión del' '"Microsoft C++ 2015 redistributable" {}.\n' 'Lo puedes conseguir de "https://www.microsoft.com/es-ES/download/details.aspx?id=48145".' .format('x86' if bits == '32' else 'x64'))
def Baim_func(b): [r,n]=b.shape ones=np.ones((n,1)) IR=np.identity(r) L=np.diag(global_A.dot(ones))-global_A G=global_Y1+global_Y2-global_rho1*global_Z1-global_rho2*global_Z2 aim =np.trace(b.dot(L.dot(b.T)))+global_rho3*0.25*np.norm(b.dot(b.T)-IR)+global_rho4*0.5*np.norm(b.dot(ones))\ +(global_rho1+global_rho2)*0.5*np.norm(b)+np.trace(b.dot(G.T)) return aim
def z(x, y): x = x[:] - np.mean(x[:]) y = y[:] - np.mean(y[:]) if np.norm(x) == 0 or np.norm(y) == 0: z = nan else: x_trans = np.transpose(X) z = (x_trans * y) / norm(x) / norm(y) return z
def distance(self, v1, v2, normalised_vectors=True): """ Returns the cosine distance between two vectors. If the vectors are normalised, there is no need for the denominator, which is always one. """ if normalised_vectors: return 1 - np.dot(v1, v2) else: return 1 - np.dot(v1, v2) / (np.norm(v1) * np.norm(v2))
def getAngleBetweenVectors(a, b, degrees=True): norm = np.linalg.norm cost = np.dot(a, b) cost /= np.norm(a) * np.norm(b) angle = np.arccos(cost) #In radians if degrees: angle *= 180/np.pi return angle
def EigenAlign_helper(A, B, s1, s2, s3, iters): # error checks gam1 = s1 + s2 - 2 * s3 gam2 = s3 - s2 gam3 = s2 nA = np.shape(A, 0) nB = np.shape(B, 0) AkronB = np.kron(A, B) AkronE = np.kron(A, np.ones(nB, nB)) EkronB = np.kron(np.ones(nA, nA), B) EkronE = np.kron(np.ones(nA, nA), np.ones(nB, nB)) M = gam1 * AkronB + gam2 * AkronE + gam2 * EkronB + gam3 * EkronE # Power iteration X = np.divide(np.ones(nA @ nB, 1), (nA @ nB)) X = np.divide(X, np.norm(X, 1)) x = np.copy(X) for i in range(iters): y = M @ x x = np.divide(y, np.norm(y)) lam = x.conj() @ y print(lam) Xmat = x.reshape(nB, nA) # for i = 1:iters # X = M*X # X = X./norm(X,2) # end # X # Xmat = reshape(X,nB,nA) # Run Hungarian method # Xmat = Xmat' # ej = munkres(-Xmat) # ei = 1:length(ej) # ids = find(ej) # ej = ej[ids] # ei = ei[ids] # or bipartite matching # try using intmatch Xmat = Xmat.conj() ei, ej = file1.edge_list( file1.bipartite_matching(spicy.sparse.csc_matrix(Xmat))) MATCHING = spicy.sparse.csc_matrix(ei, ej, 1, nA, nB) weight = X.conj() @ MATCHING.conj()[:] Ai = A[ei, ei] Bi = B[ej, ej] conserved_edges = np.nnz(Ai * Bi) / 2 return ei, ej, x, lam # ,Xmat,weight,conserved_edges
def is_close(self, x): """ Return True if x is close to the charge. """ theta1 = angle(x, self.x1, self.x2) theta2 = angle(x, self.x2, self.x1) if theta1 < radians(90) and theta2 < radians(90): return point_line_distance(x, self.x1, self.x2) < self.R else: return np.min([np.norm(self.x1-x), np.norm(self.x2-x)], axis=0) < \ self.R
def compute_transform(vec_1, vec_2): """ Given two vectors in R^2, compute the euclidean transformation taking vec_2 to vec_1. """ return { "shift": vec_1 - vec_2, "rotation": np.arccos( np.dot_product(vec_1 / np.norm(vec_1), vec_2 / np.norm(vec_2))) }
def cosinesimilarity1(a, traindata): a = a.clip(0) norm_a = np.norm(a) for key, b in traindata.items(): b = b.clip(0) dot_product = np.dot(a, b) norm_b = np.norm(b) result1 = dot_product / (norm_a * norm_b) return result1
def ccd(self): distance = np.sqrt((self.tw - self.x[2]) + (self.tz - self.z[2])) #iterate until end effector is less than 1 point away. while distance > 1: pt = (tw,tz) e = (self.x[2], self.z[2]) j = (self.x[1],self,z[1]) temp1 = (e - j)/np.norm(e - j) temp2 = (pt - j)/np.norm(pt - j) angle = np.arccos(np.dot(temp1,temp2))
def richardson(A, P, b, x0, tol=1e-6, maxit=100): x = 1*x0 r = np.norm(np.linalg.inv(P).dot(A).dot(x)-b) it = 0 I = np.eye(A.shape[0]) while r < tol and it < maxit: x = (I - np.linalg.inv(P).dot(A)).dot(x) + np.linalg.inv(P).dot(b) r = np.norm(np.linalg.inv(P).dot(A).dot(x)-b) print("it = ", it, "r = ", r) return x
def limits(predicted, target): sym_losses = [] rom_losses = [] #print("POS p", predicted.shape) for frame in predicted[:, 0, :, :]: print("FRAME", frame) sym_loss, thetas, rom_loss = m.in_frame(frame) sym_losses.append(sym_loss) rom_losses.append(sym_loss) sym_loss = torch.mean(torch.tensor(np.norm(sym_losses))) rom_loss = torch.mean(torch.tensor(np.norm(rom_losses))) return sym_loss + rom_loss
def vector_partial_gradient(self, u, v, normalised_vectors=True): """ This function returns the gradient of cosine distance: \frac{ \partial dist(u,v)}{ \partial u} If they are both of norm 1 (we do full batch and we renormalise at every step), we can save some time. """ if normalised_vectors: gradient = u * np.dot(u, v) - v else: norm_u = np.norm(u) norm_v = np.norm(v) nominator = u * np.dot(u, v) - v * np.power(norm_u, 2) denominator = norm_v * np.power(norm_u, 3) gradient = nominator / denominator return gradient
def sim_cosine(vector1, vector2, **args): ''' An implementation of the cosine similarity. The result is the cosine of the angle formed between the two preference vectors. Note that this similarity does not "center" its data, shifts the user's preference values so that each of their means is 0. For this behavior, use Pearson Coefficient, which actually is mathematically equivalent for centered data. Parameters: vector1: The vector you want to compare vector2: The second vector you want to compare args: optional arguments The value returned is in [0,1]. ''' if len(vector1) == 0 or len(vector2) == 0: return 0.0 #Using Content Mode. if type(vector1) == type({}): try: from numpy import dot, norm v = [(vector1[item], vector2[item]) for item in vector1 if item in vector2] vector1 = [vec[0] for vec in v] vector2 = [vec[1] for vec in v] except ImportError: def dot(p1, p2): return sum([p1.get(item, 0) * p2.get(item, 0) for item in p2]) def norm(p): return sqrt( sum([p.get(item, 0) * p.get(item, 0) for item in p])) else: try: from numpy import dot, norm except ImportError: def dot(p1, p2): return sum([p1[i] * p2[i] for i in xrange(len(p1))]) def norm(p): return sqrt(sum([p[i] * p[i] for i in xrange(len(p))])) return dot(vector1, vector2) / (norm(vector1) * norm(vector2))
def SymmetricalMatch(inj1, inj2): # try to find a transformation T (combining rotations and symmetries) # that minimizes the L2 norm between inj1 and T(inj2) bestinj2 = inj2 bestnorm = numpy.norm(inj1 - bestinj2, 2) newinj2 = inj2 for mirrortype in range(0, 4): newinj2 = Mirror(newinj2, mirrortype) for i in range(0, 4): newinj2 = Rotation(newinj2) newnorm = numpy.norm(inj1 - newinj2, 2) if (newnorm < bestnorm): bestnorm = newnorm bestinj2 = newinj2 return bestinj2
def compute_normalized_distances_raw_embed(name, min_val, max_val, othername, min_valother, max_valother, transform, number, transformother, ending): total_dif = 0 for img in os.listdir(name): if img.find(ending) != -1: im = transform(name + img) cmp_im = transformother(img, othername) for oimg in os.listdir(name): if oimg.find(ending) != -1 and oimg != img: oim = transform(name + oimg) ocmp_im = transformother(oimg, othername) dif = normalize(np.norm(im - oim), min_val, max_val) cmp_dif = normalize(np.norm(cmp_im - ocmp_im), min_valother, max_valother) total_dif += np.norm(dif-cmp_dif) return total_dif
def compute_vote(lbda,mu,omega,dx,dy,kappa): """ COMPUTEVOTE() computes the following: For (lambda, mu, omega, x, y) |-> votes for point at distance exp(kappa * omega) away from (lambda,mu) in the direction normal to (x,y). """ # 1) compute normal to (x,y): normal_vec = ( -dy / np.norm((dx,dy)) , dx / np.norm((dx,dy)) ) # 2) compute appropriate distance away from (lambda,mu): dist = exp(kappa * omega) # 3) compute voted point: return ( lbda+(normal_vec[0]*dist) , mu+(normal_vec[1]*dist) )
def get_focal_vector(img,kappa,window=3,lbound=0.3): """ get_focal_vector takes an image and extracts the focal point from a fingerprint. Performs the following: 1. Finds all the points of high curvature, P = p_1, ..., p_N 2. For each point, compute it's voted focal point VOTE(p_i) 3. For each point, compute the normal vector to the flow tangent u_i 4. Compute the focal point as the centroid of voted focals 5. Compute the mean curvature (`theta`) as the mean of the normals 6. Return focal point(s). """ # 1) find all clusters of high curvature: # run function to extract array of high curvature points: high_curv_pts = get_high_curv_pts(img,window,lbound); # delete any row with all zeros: delete_zero_rows(high_curv_pts) # FIX THIS!!! # get number of points: num_points = np.shape(high_curv_pts)[0] # 2) for each point, compute voted focal point: focal_candidates = np.zeros((num_points,2)) for p in range(0,num_points): pt = high_curv_pts[p,:] vote = compute_vote(pt[0],pt[1],pt[2],pt[3],pt[4],kappa) focal_candidates[p,0] = vote[0] focal_candidates[p,1] = vote[1] # 3) for each point, compute normal vector to flow tangent normals = zeros(num_points,2); for q in range(0,num_points): pt = high_curv_pts[q,:] tangent_flow = pt[4:5] norm_vec = ( -tangent_flow(2) , tangent_flow(1) ) magnitude = np.norm(norm_vec) normals[q,:] = norm_vec / magnitude # 4) compute focal point as centroid of voted focals focal = np.sum(focal_candidates,1) / num_points # 5) compute mean curvature as mean of normals theta_sum = np.sum(normals,1) theta = theta_sum / np.norm(theta_sum) # 6) return focal points: return [ focal theta ]
def point_line_distance(x0, x1, x2): """ Find the shortest distance between the point x0 and the line x1 to x2. point line distance pointlinedistance """ assert x1.shape == x2.shape == (2, ) return np.fabs(np.cross(x0 - x1, x0 - x2)) / np.norm(x2 - x1)
def polak_ribiere(algo): """ Polak-Ribiere descent direction update method. """ b = np.dot(algo.current_gradient.T, (algo.current_gradient - algo.last_gradient)) b /= np.norm(algo.last_gradient) return b
def G(s, t, r0, h0, r1, h1, a0, b0, c0, a1, b1, c1, delta): import numpy lenDelta = numpy.norm(delta) h0Div2 = h0 / 2.0 h1Div2 = h1 / 2.0 omsmt = 1 - s - t ssqr = s * s tsqr = t * t omsmtsqr = omsmt * omsmt temp = ssqr + tsqr + omsmtsqr L0 = a0 * s + b0 * t + c0 * omsmt L1 = a1 * s + b1 * t + c1 * omsmt Q0 = temp - L0 * L0 Q1 = temp - L1 * L1 return r0 * sqrt(Q0) + r1 * sqrt(Q1) + h0Div2 * numpy.norm(L0) + h1Div2 * numpy.norm(L1) - omsmt * lenDelta
def resample(self, spacing): """ Returns a field without edges, but with each of our edges resample at the given spacing. Any edges that have one or both endpoints with NaN are skipped """ valid = np.isfinite(self.F) X = [self.X[valid]] F = [self.F[valid]] for a, b in self.edges: if np.isnan(self.F[a]) or np.isnan(self.F[b]): continue length = np.norm(self.X[b] - self.X[a]) steps = int(np.ceil(length / spacing)) alpha = np.arange(1, steps) / float(steps) X.append((1 - alpha[:, None]) * self.X[a] + alpha[:, None] * self.X[b]) F.append((1 - alpha) * self.F[a] + alpha * self.F[b]) X = np.concatenate(X) F = np.concatenate(F) return field.XYZField(X=X, F=F)
def find_v(omega, theta, trans): """ Finds the linear velocity term of the twist (v,omega) given omega, theta and translation Args: omega - (3,) ndarray : the axis you want to rotate about theta - scalar value trans - (3,) ndarray of 3x1 list : the translation component of the rigid body transform Returns: v - (3,1) ndarray : the linear velocity term of the twist (v,omega) """ #YOUR CODE HERE R = kfs.rotation_3d(omega, theta) I = np.identity(3) if np.array_equal(R, I): v = trans / np.norm(trans) else: omega1 = np.array([[omega[0]], [omega[1]], [omega[2]]]) A = ((I - kfs.rotation_3d(omega, theta)).dot(kfs.skew_3d(omega)) + (omega1).dot(omega1.T) * theta) v = (np.linalg.inv(A)).dot(trans) v = np.array([[v[0]], [v[1]], [v[2]]]) return v
def __call__(self, i, j): mm = x1 - self.meanXSN v = np.dot(self.iSw, mm) v = v / np.norm(v) v0 = np.dot(v.T, (x1 + self.meanXSN)) / 2. score = np.dot(v.T, x2) - v0 return score
def solve(system, gamma=0.9, byPol=True, tol=1e-8): numNodes = system.network.numNodes numTrt = agents.Agent.numTrt(system) numValidTrt = agents.Agent.numValidTrt(numNodes, numTrt) r = np.array(PolicyIteration2.calcR(system)) p = np.array(PolicyIteration2.calcP(system)) pol0 = [[0] for i in range(1 << numNodes)] if not byPol: v0 = PolicyIteration2.vForPolicy(pol0, system, r, p, gamma) cont = True while cont: pol1 = PolicyIteration2.policyImprovement(pol0, system, r, p, gamma) if not byPol: v1 = PolicyIteration2.vForPolicy(pol1, system, r, p, gamma) if byPol: cont = pol0 != pol1 else: cont = np.norm(v0 - v1) > tol pol0 = pol1 if not byPol: v0 = v1 return pol0
def sim_cosine(vector1, vector2, **args): ''' An implementation of the cosine similarity. The result is the cosine of the angle formed between the two preference vectors. Note that this similarity does not "center" its data, shifts the user's preference values so that each of their means is 0. For this behavior, use Pearson Coefficient, which actually is mathematically equivalent for centered data. Parameters: vector1: The vector you want to compare vector2: The second vector you want to compare args: optional arguments The value returned is in [0,1]. ''' if len(vector1) == 0 or len(vector2) == 0: return 0.0 # Using Content Mode. if type(vector1) == type({}): try: from numpy import dot, norm v = [(vector1[item], vector2[item]) for item in vector1 if item in vector2] vector1 = [vec[0] for vec in v] vector2 = [vec[1] for vec in v] except ImportError: def dot(p1, p2): return sum([p1.get(item, 0) * p2.get(item, 0) for item in p2]) def norm(p): return sqrt(sum([p.get(item, 0) * p.get(item, 0) for item in p])) else: try: from numpy import dot, norm except ImportError: def dot(p1, p2): return sum([p1[i] * p2[i] for i in xrange(len(p1))]) def norm(p): return sqrt(sum([p[i] * p[i] for i in xrange(len(p))])) return dot(vector1, vector2) / (norm(vector1) * norm(vector2))
def knn(test, train_images): 'compare test against train_images' test_ravel = np.ravel(test) train_ravel = [np.ravel(img) for img in train_images] min_metric = np.norm(test_ravel-train_ravel[0]) min_idx = 0 for i,img in enumerate(train_ravel): metric = np.norm(test_ravel-img) if metric<min_metric: min_metric = metric min_idx = i return min_idx, min_metric
def pointToLineSegement(point, start,end): direction = start - end normalized = direction/(np.norm(direction)) phase = start - normalized #starting from this point, moving one unit at a time we pass through the line maxbound = norm(direction) #so from K = 1 to K = maxbound we are on the line segment, phase + K*normalized # Line = normalized + possible = np.dot(phase - point,)
def rotation_matrix(a1, a2, b1, b2): """Returns a rotation matrix that rotates the vectors *a1* in the direction of *a2* and *b1* in the direction of *b2*. In the case that the angle between *a2* and *b2* is not the same as between *a1* and *b1*, a proper rotation matrix will anyway be constructed by first rotate *b2* in the *b1*, *b2* plane. """ a1 = np.asarray(a1, dtype=float) / np.norm(a1) b1 = np.asarray(b1, dtype=float) / np.norm(b1) c1 = np.cross(a1, b1) c1 /= np.norm(c1) # clean out rounding errors... a2 = np.asarray(a2, dtype=float) / np.norm(a2) b2 = np.asarray(b2, dtype=float) / np.norm(b2) c2 = np.cross(a2, b2) c2 /= np.norm(c2) # clean out rounding errors... # Calculate rotated *b2* theta = np.arccos(np.dot(a2, b2)) - np.arccos(np.dot(a1, b1)) b3 = np.sin(theta) * a2 + np.cos(theta) * b2 b3 /= np.norm(b3) # clean out rounding errors... A1 = np.array([a1, b1, c1]) A2 = np.array([a2, b3, c2]) R = np.linalg.solve(A1, A2).T return R
def EB(r, rs, v, a): gamma = 1/np.sqrt(1-np.norm(v)**2) R = np.linalg.norm(r-rs) n = (r-rs)/R firstterm = (n-v)/(gamma**2 * (1-np.dot(n,v))**3 * R**2) secondterm = np.cross(n, np.cross(n - v, a) ) / (1-np.dot(n,v))**3 / R E = firstterm + secondterm B = np.cross(n,E) return {'E': E, 'B', B}
def truncate_onesite(A,direction,maxD): d,D1,D2 = A.shape if direction=='lr': A = np.reshape(A,(d*D1,D2)) B,S,U = svd2(A) DB = S.shape[0] D2 = B.shape(B,2) if DB>maxD: S = np.diag(S) #transform the diag matrix S into a vector S = S[0:maxD] #truncate the singular values S = np.dot(1/np.norm(S),S) #rinormalize such that \sum_i s_i^2=1 S = np.diag(S) #transform the vector S into a diag matrix B = B[:,0:maxD] #truncate B U = U[0:maxD,:] #truncate U B = np.reshape(B,(d,D1,maxD)) else: B = np.reshape(B,(d,D1,D2)) U = np.dot(S,U) elif direction=='rl': A = np.ndarray.transpose(A,1,0,2) A = np.reshape(A,(D1,d*D2)) U,S,B = svd2(A) DB = S.shape[0] D1 = B.shape(B,2) if DB>maxD: S = np.diag(S) S = S[0:maxD] S = np.dot(1/np.norm(S),S) S = np.diag(S) B = B[0:maxD,:] U = U[:,0:maxD] B = np.reshape(B,(maxD,d,D2)) else: B = np.reshape(B,(D1,d,D2)) B = np.ndarray.transpose(B,(1,0,2)) U = np.dot(U,S) return B,U,DB
def as_rotation_matrix(q): """Convert input quaternion to 3x3 rotation matrix Parameters ---------- q: quaternion or array of quaternions The quaternion(s) need not be normalized, but must all be nonzero Returns ------- rot: float array Output shape is q.shape+(3,3). This matrix should multiply (from the left) a column vector to produce the rotated column vector. Raises ------ ZeroDivisionError If any of the input quaternions have norm 0.0. """ if q.shape == (): # This is just a single quaternion n = q.norm() if n == 0.0: raise ZeroDivisionError("Input to `as_rotation_matrix({0})` has zero norm".format(q)) elif abs(n-1.0) < _eps: # Input q is basically normalized return np.array([ [1 - 2*(q.y**2 + q.z**2), 2*(q.x*q.y - q.z*q.w), 2*(q.x*q.z + q.y*q.w)], [2*(q.x*q.y + q.z*q.w), 1 - 2*(q.x**2 + q.z**2), 2*(q.y*q.z - q.x*q.w)], [2*(q.x*q.z - q.y*q.w), 2*(q.y*q.z + q.x*q.w), 1 - 2*(q.x**2 + q.y**2)] ]) else: # Input q is not normalized return np.array([ [1 - 2*(q.y**2 + q.z**2)/n, 2*(q.x*q.y - q.z*q.w)/n, 2*(q.x*q.z + q.y*q.w)/n], [2*(q.x*q.y + q.z*q.w)/n, 1 - 2*(q.x**2 + q.z**2)/n, 2*(q.y*q.z - q.x*q.w)/n], [2*(q.x*q.z - q.y*q.w)/n, 2*(q.y*q.z + q.x*q.w)/n, 1 - 2*(q.x**2 + q.y**2)/n] ]) else: # This is an array of quaternions n = np.norm(q) if np.any(n == 0.0): raise ZeroDivisionError("Array input to `as_rotation_matrix` has at least one element with zero norm") else: # Assume input q is not normalized m = np.empty(q.shape + (3, 3)) q = as_float_array(q) m[..., 0, 0] = 1.0 - 2*(q[..., 2]**2 + q[..., 3]**2)/n m[..., 0, 1] = 2*(q[..., 1]*q[..., 2] - q[..., 3]*q[..., 0])/n m[..., 0, 2] = 2*(q[..., 1]*q[..., 3] + q[..., 2]*q[..., 0])/n m[..., 1, 0] = 2*(q[..., 1]*q[..., 2] + q[..., 3]*q[..., 0])/n m[..., 1, 1] = 1.0 - 2*(q[..., 1]**2 + q[..., 3]**2)/n m[..., 1, 2] = 2*(q[..., 2]*q[..., 3] - q[..., 1]*q[..., 0])/n m[..., 2, 0] = 2*(q[..., 1]*q[..., 3] - q[..., 2]*q[..., 0])/n m[..., 2, 1] = 2*(q[..., 2]*q[..., 3] + q[..., 1]*q[..., 0])/n m[..., 2, 2] = 1.0 - 2*(q[..., 1]**2 + q[..., 2]**2)/n return m
def expmScipy(A,q=7): """Compute the matrix exponential using Pade approximation. Parameters ---------- A : array, shape(M,M) Matrix to be exponentiated q : integer Order of the Pade approximation Returns ------- expA : array, shape(M,M) Matrix exponential of A """ A = asarray(A) ss = True if A.dtype.char in ['f', 'F']: pass ## A.savespace(1) else: pass ## A.savespace(0) # Scale A so that norm is < 1/2 nA = np.norm(A,Inf) if nA==0: return identity(len(A), A.dtype.char) from numpy import log2 val = log2(nA) e = int(floor(val)) j = max(0,e+1) A = A / 2.0**j # Pade Approximation for exp(A) X = A c = 1.0/2 N = eye(*A.shape) + c*A D = eye(*A.shape) - c*A for k in range(2,q+1): c = c * (q-k+1) / (k*(2*q-k+1)) X = dot(A,X) cX = c*X N = N + cX if not k % 2: D = D + cX; else: D = D - cX; F = solve(D,N) for k in range(1,j+1): F = dot(F,F) pass ## A.savespace(ss) return F
def BendByWindow(self, point, ri): # lastSurface = Surface.objects.get(surfaceIndex=self.surfaceIndex - 1) # ri nvec = self.ZNormFunc(point.x, point.y) ivec = [math.sin(point.theta) * math.cos(point.phi), math.sin(point.theta) * math.cos(point.phi), math.cos(point.theta)] avec = ivec - np.dot(ivec, nvec) * nvec ovec = np.sign(np.dot(ivec, nvec)) * nvec + avec/math.sqrt(math.pow(ri, 2) - np.dot(avec, avec)) ovec /= np.norm(ovec) theta = math.acos(ovec[2]) phi = math.atan2(ovec[1], ovec[0]) return [point.x, point.y, point.z, theta, phi]
def direct(sun_pos, grid): """ Fire collimated rays from sun location into scene Sample across whole grid """ # for each pixel at top of grid pass sun rays in for i in xrange(grid.gr.shape[0]): """ Make an array starting at loc """ xpos = i * grid.xres ypos = grid.zres * grid.zsize pos = np.array(xpos, ypos) direction = pos - sun_pos / np.norm(pos - sun_pos) # this location minus r = ray(pos, direction) """
def gradient_fall(A0,b0): A = np.dot(np.transpose(A0),A0) b = np.dot(np.transpose(A0),b0) m = A.__len__() eps = 1 x = np.zeros((m,1)) d = np.zeros((m,1)) g = -b while abs (eps) > 1: g_prev = g g = np.dot(A,x) - b d = -g + ((np.transpose(g) * g) / (np.transpose(g_prev) * g_prev)) * d s = - (np.transpose(d) * g) / (np.transpose(d) * A * d) x = x + s * d eps = np.norm (A * x - b) return x
def GDer(s, t, r0, h0, r1, h1, a0, b0, c0, a1, b1, c1, delta): import numpy lenDelta = numpy.norm(delta) h0Div2 = h0 / 2.0 h1Div2 = h1 / 2.0 omsmt = 1 - s - t ssqr = s * s tsqr = t * t omsmtsqr = omsmt * omsmt temp = ssqr + tsqr + omsmtsqr L0 = a0 * s + b0 * t + c0 * omsmt L1 = a1 * s + b1 * t + c1 * omsmt Q0 = temp - L0 * L0 Q1 = temp - L1 * L1 diffS = s - omsmt diffT = t - omsmt diffa0c0 = a0 - c0 diffa1c1 = a1 - c1 diffb0c0 = b0 - c0 diffb1c1 = b1 - c1 halfQ0s = diffS - diffa0c0 * L0 halfQ1s = diffS - diffa1c1 * L1 halfQ0t = diffT - diffb0c0 * L0 halfQ1t = diffT - diffb1c1 * L1 factor0 = r0 / sqrt(Q0) factor1 = r1 / sqrt(Q1) signL0 = numpy.sign(L0) signL1 = numpy.sign(L1) gradient = numpy.array([0, 0]) gradient[0] += halfQ0s * factor0 gradient[0] += halfQ1s * factor1 gradient[0] += h0Div2 * diffa0c0 * signL0 gradient[0] += h1Div2 * diffa1c1 * signL1 gradient[0] += lenDelta gradient[1] += halfQ0t * factor0 gradient[1] += halfQ1t * factor1 gradient[1] += h0Div2 * diffb0c0 * signL0 gradient[1] += h1Div2 * diffb1c1 * signL1 gradient[1] += lenDelta return gradient
def checkTreeCoeffs(): """Check whether tree coefficients are properly generated or not. Generates random fieldGens. Find velocities using FMM and regular method, and compares accuracies at different positions""" # Generate random fieldGens and put them in random lists fieldGens = randomFieldGens() # Create random 20 positions; where velocity field will be evaluated pos = numpy.zeros([20,2]) for i in range(20): pos[i][0] = random.random() pos[i][1] = random.random() # Find Velocity with regular method fieldReg = dfn.velField(pos,fieldGens,vinf = 0.0) # Find Velocity with FMM fieldFMM = velFieldFMM(pos,fieldGens,vinf = 0.0) # Find and print errors between the two for i in range(20): err = numpy.norm(fieldReg - fieldFMM) print err if err > 1e-06: print "Error is greater than 1e-06"
def as_euler_angles(q): """Open Pandora's Box If somebody is trying to make you use Euler angles, tell them no, and walk away, and go and tell your mum. You don't want to use Euler angles. They are awful. Stay away. It's one thing to convert from Euler angles to quaternions; at least you're moving in the right direction. But to go the other way?! It's just not right. Parameters ---------- q: quaternion or array of quaternions The quaternion(s) need not be normalized, but must all be nonzero Returns ------- alpha_beta_gamma: float array Output shape is q.shape+(3,). These represent the angles (alpha, beta, gamma), where the normalized input quaternion represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`. Raises ------ AllHell If you try to actually use Euler angles, when you could have been using quaternions like a sensible person. """ alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float) n = np.norm(q) q = as_float_array(q) alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2]) alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n)) alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2]) return alpha_beta_gamma
def log(cls, R): R = cls.unpack(R) # http://math.stackexchange.com/questions/83874/ t = R.trace() r = np.array(( R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1] )) if t >= 3. - 1e-8: return (.5 - (t-3.)/12.) * r elif t > -1. + 1e-8: th = np.arccos(t/2. - .5) return th / (2. * np.sin(th)) * r else: assert t <= -1. + 1e-8 a = np.argmax(R[ np.diag_indices_from(R) ]) b = (a+1) % 3 c = (a+2) % 3 s = np.sqrt(R[a,a] - R[b,b] - R[c,c] + 1.) v = np.empty(3) v[a] = s/2. v[b] = (R[b,a] + R[a,b]) / (2.*s) v[c] = (R[c,a] + R[a,c]) / (2.*s) return v / np.norm(v)
def line_search(x_old,f_old,g,p,function,data,max_step,tol_x=1e-8,alpha=1e-4): check = False # Scale if attempted step is too big if np.linalg.norm(p) > max_step: p *= max_step/np.norm(p) slope = np.dot(g,p) if (slope >= 0.0): print "Newton solver: roundoff problem in line search, exiting..." return x_old, None, f_old, check x_scale = np.max(np.append(np.abs(x_old),1.0)) lamda_norm = np.max(np.abs(p)/x_scale) alamin = tol_x/lamda_norm alam = 1.0 while True: # take step x = x_old + alam*p # evaluate function fv = function(x,data) f = 0.5*np.dot(fv,fv) if alam < alamin: # convergence on dx x = xold check = True return x, fv, f, check elif f <= f_old + alpha*alam*slope: # sufficient function decrease, backtrack return x, fv, f, check else: if alam == 1.0: tmplam = -slope/(2.0*(f-f_old-slope)) # first attempt else: # subsequent backtracks rhs1 = f - f_old - alam*slope rhs2 = f2-fold-alam2*slope; a = (rhs1/(alam*alam) - rhs2/(alam2*alam2))/(alam - alam2) b = (-alam2*rhs1/(alam*alam) + alam*rhs2/(alam2*alam2))/(alam - alam2) if a == 0.0: tmplam = -slope/(2.0*b) else: disc = b*b - 3.0*a*slope if (disc < 0.0): tmplam = 0.5*alam elif (b <= 0.0): tmplam = (-b + np.sqrt(disc))/(3.0*a) else: tmplam = -slope/(b + np.sqrt(disc)) if (tmplam > 0.5*alam): tmplam = 0.5*alam alam2 = alam; f2 = f; alam = np.max([tmplam,O.l*alam]) # try again
def _cosine(self, vector1, vector2): """ related documents j and q are in the concept space by comparing the vectors : cosine = ( V1 * V2 ) / ||V1|| x ||V2|| """ return float(numpy.dot(vector1,vector2) / (numpy.norm(vector1) * numpy.norm(vector2)))
def ZNormFunc(self, x, y): tmp = self.DzdaFunc(x, y) * 2 n = [tmp * (x - self.surfaceDecenterX), tmp * (y - self.surfaceDecenterY), -1] return n / np.norm(n)
def norm(self, points): return numpy.norm(self.parameter)
def condition(a): """Calculate the condition of the matrix. :param a: a numpy matrix. """ return np.norm(a) * np.norm(np.linalg.inv(a))
# OBS: each row should add up to one. print A print np.dot(A,A) # Obs, different notation if np.matrix! # Question 2 # # What are the transition probabilities after 2 transitions? After 5? After 10? What are the steady state probabilities?# print np.linalg.matrix_power(A,2+1) print np.linalg.matrix_power(A,5+1) print np.linalg.matrix_power(A,10+1) print np.linalg.matrix_power(A,30+1) # Question 2b # # What could you do with that information? # # Would you be optimistic, neutral or pesimistic about the market? # # Question 3 # # Can you a name some real-life examples that could be modeled by Markov chains? # # Can you name examples that cannot be treated as Markov chains? # # Can you name an example of finite probabilistic states that cannot be modeled as Markov chains? # # Extra Question: How can you know for sure when it converges? # # IE, a more scientific method? # tol, np.norm( np.linalg.matrix_power(A,n+1) - np.linalg.matrix_power(A,n+1+1)) ) < tol