def getE(g, k): m = 10 ^ -6 KInv = linalg.inv(k + numpy.eye(k.shape[1]) * m) Ktrans = linalg.transpose(k) KtransInv = linalg.inv(Ktrans + +numpy.eye(Ktrans.shape[1]) * m) e = KtransInv * g * KInv return e
def log_p_ml(dim, K, ln_lambda, beta0, betak, v0, vk, m0, mk, w0, wk): """ :param dim: :param K: :param ln_lambda: :param beta0: :param betak: :param v0: :param vk: :param m0: :param mk: :param w0: :param wk: :return: """ diff = mk - m0 #[K, dim] s1 = np.einsum('ki, kij, kj->k', diff, wk, diff) s1 = beta0*np.multiply(vk, s1) #[K,] s2 = np.einsum('ij, kji->k', inv(w0), wk) #[K,] s2 = np.multiply(vk, s2) #[K,] sum = (v0 - dim)*ln_lambda - dim*beta0*np.reciprocal(betak) + \ dim*np.log(0.5*beta0 / np.pi) - s1 - s2 #[K,] sum = 0.5*sum.sum() #float (_, minus_log_b) = slogdet(w0) minus_log_b = 0.5*v0*(minus_log_b + dim*np.log(2)) + 0.25*dim*(dim-1.0)*np.log(np.pi) minus_log_b += np.sum([gammaln((v0 + 1 - i) / 2.0) for i in range(dim)]) sum -= K*minus_log_b return sum
def get_f_by_comp(self, V): V_ = V[0:V.shape[0] - 1, :] v = V[V.shape[0] - 1, :] g = inv(dot(transpose(V_), V_)) a = dot(v, g) P = dot(a, transpose(V_)) return P
def learn_matrix_factorization(self): # Initialize Sj and tj but note that Sj is initialized implicitly t = [numpy.zeros([self.n,]) for _ in range(self.J)] # initialize tj = 0, Sj for j in range(self.J): # Initialize Sj = diag(1/pl) self.S[j] = diag(1/self.rho) # for i = 1:I, update Phi_i and ui for i in range(self.I): self.compute_phi(i) # Compute Phi_i self.update_u(i) # Update ui # For j in N(i) for j in self.rated_movie_by_user[i]: # Sj = Sj + (phi+uu')/tau self.S[j] = self.S[j]+(self.Phi[i]+outer(self.U[i,:],self.U[i,:]))/self.tau # tj = tj+m_ij ui/tau t[j] += self.R[i][j]*self.U[i,:]/self.tau # Update sigma self.update_sigma() # For j = 1:J update Q(vj) for j in range(self.J): self.Psi[j] = inv(self.S[j]) self.update_v(j, t) # Update tau self.update_tau()
def dual_simplex_method2(a, b, c, jb): m, n = len(a), len(c) ab = a[:, jb] ab_inv = linalg.inv(ab) y = np.dot([c[i] for i in jb], ab_inv) koplan = np.dot(y, a) - c if any([i < -eps for i in koplan]): return None while True: kapa_b = np.dot(ab_inv, b) if min(kapa_b) > -eps: kapa = [0] * n for j, i in zip(kapa_b, jb): kapa[i] = j return kapa, jb, np.dot(c, kapa) k = np.argmin(kapa_b) j_n = [j for j in range(n) if j not in jb] e = np.zeros(m) e[k] = 1 mu = e.dot(ab_inv.dot(a)) if all(mu[i] >= 0 for i in j_n): return None s = [np.inf] * n for j in j_n: if mu[j] < 0: s[j] = -koplan[j] / mu[j] j0 = np.argmin(s) y = y + (s[j0] * e).dot(b) koplan = koplan + s[j0] * mu jb[k] = j0 ab_inv = inv_matrix(ab_inv, k, a[:, j0], m)
def mnk(xs: np.ndarray, ys: np.array, tau): n = len(xs) _xs = np.array([xs[i].tolist() + [1] for i in range(n)]) ws = np.dot( np.dot(linalg.inv(np.dot(_xs.T, _xs) + tau * np.eye(len(_xs[1]))), _xs.T), ys) return ws
def calibrateImages(images): for image in images: logging.info(f'IMAGE {image.id:02d}:Calibrating images...') ins = image.ins ex = image.ex pmat = ins @ ex R = np.array([[ex[0][0], ex[0][1], ex[0][2]], [ex[1][0], ex[1][1], ex[1][2]], [ex[2][0], ex[2][1], ex[2][2]]]) t = np.array([ex[0][3], ex[1][3], ex[2][3]]) center = -inv(R) @ t center = np.array([center[0], center[1], center[2], 1]) zaxis = np.array(pmat[2]) zaxis[3] = 0 ftmp = norm(zaxis) zaxis /= ftmp zaxis = np.array([zaxis[0], zaxis[1], zaxis[2]]) xaxis = np.array([pmat[0][0], pmat[0][1], pmat[0][2]]) yaxis = cross(zaxis, xaxis) yaxis /= norm(yaxis) xaxis = cross(yaxis, zaxis) image.pmat = pmat image.center = center image.xaxis = xaxis image.yaxis = yaxis image.zaxis = zaxis
def calcW(K,W0,xd,NK,m0,XDim,beta0,S): Winv = [None for _ in range(K)] for k in range(K): Winv[k] = inv(W0) + NK[k]*S[k] Q0 = reshape(xd[k,:] - m0, (XDim,1)) q = dot(Q0,Q0.T) Winv[k] += (beta0*NK[k] / (beta0 + NK[k]) ) * q assert shape(q)==(XDim,XDim) W = [] for k in range(K): try: W.append(inv(Winv[k])) except linalg.linalg.LinAlgError: print 'Winv[%i]'%k, Winv[k] raise linalg.linalg.LinAlgError() return W
def fit(self, func, params_dist, pre_X = None, pre_y = None): time_start = time.time() if self.random_state is not None: np.random.seed(self.random_state) grid, grid_scaled = self.get_grid(params_dist) mu = np.zeros(self.n_grid) + self.mu_prior sigma = np.ones(self.n_grid)*self.sigma_prior X = np.zeros((self.max_iter, self.n_params)) X_scaled = np.matrix(np.zeros((self.max_iter, self.n_params))) y = np.zeros(self.max_iter) if (pre_X is not None) and (pre_y is not None): pre_X_mat, pre_X_scaled = self.scale(pre_X, pre_y, params_dist) X = np.vstack([pre_X_mat, X]) X_scaled = np.vstack([pre_X_scaled, X_scaled]) y = np.concatenate([pre_y, y]) pre_len = len(pre_y) else: pre_len = 0 if self.verbose: params_name = [i[:9] for i in self.params_name] logger.info('%4s|%9s|%9s|%9s', 'Iter','Func','Max', '|'.join(['{:9s}'.format(i) for i in params_name])) for i in xrange(pre_len, pre_len + self.max_iter): #beta = (i + 1)**2 if self.beta_mode == 'log': d = len(self.params_name) beta = 2*np.log(2 *(i + 1)**2 * np.pi**2 /.3) + \ 2*d*np.log( (i+1)**2 * d * 2) elif self.beta_mode == 'linear': beta = i + 1 elif self.beta_mode == 'square': beta = (i + 1)**2 else: logger.error("What The Hell. Change Beta Parameter") idx = np.argmax(mu + np.sqrt(beta)*sigma) X[i,:] = grid[idx] X_scaled[i] = grid_scaled[idx] y[i] = func(**dict(zip(self.params_name, X[i]))) KT = self.kernel(X_scaled[:(i + 1)], X_scaled[:(i + 1)])*\ self.sigma_prior invKT = inv(KT + self.sig**2*identity(i + 1)) grid, grid_scaled = self.get_grid(params_dist) kT = self.kernel(X_scaled[:(i + 1)], grid_scaled)*\ self.sigma_prior**2 mu = self.mu_prior + \ kT.T.dot(invKT).dot(y[:(i + 1)] - self.mu_prior) sigma2 = np.ones(self.n_grid)*self.sigma_prior**2 - \ diag(kT.T.dot(invKT).dot(kT)) sigma = np.sqrt(sigma2) ### Save Data if self.verbose: logger.info('%4d|%9.4g|%9.4g|%s', i, y[i], np.max(y[:(i + 1)]), '|'.join(['{:9.4g}'.format(ii) for ii in X[i]])) if time.time() - time_start > self.time_budget: break self.X = X[:(i + 1)] self.y = y[:(i + 1)] self.mu = mu self.beta = beta self.sigma = sigma self.grid = grid
def getBucklingInfo(self, structureDir): poscar = open(structureDir + '/POSCAR', 'r') poscarLines = [line.strip() for line in poscar] poscar.close() contcar = open(structureDir + '/DOS/CONTCAR', 'r') contcarLines = [line.strip() for line in contcar] contcar.close() poscarCounts = [int(count) for count in poscarLines[5].strip().split()] oldCpos = [] for line in poscarLines[7:7 + poscarCounts[0]]: stringPos = line.split() floatPos = [float(comp) for comp in stringPos] oldCpos.append(floatPos) contcarCounts = [ int(count) for count in contcarLines[6].strip().split() ] newCpos = [] for line in self.relaxedPositions[:contcarCounts[0]]: newCpos.append(line) NNpairs = self.getNNPairs(oldCpos) buckleDistances = [] for pair in NNpairs: ind1 = pair[0] ind2 = pair[1] pos1 = newCpos[ind1] pos2 = newCpos[ind2] zpos1 = pos1[2] trialPos2 = [] pos2Dir = dot(inv(transpose(self.relaxedVecs)), transpose(pos2)) for i in [-1, 0, 1]: for j in [-1, 0, 1]: for k in [-1, 0, 1]: newPos = [ pos2Dir[0] + i, pos2Dir[1] + j, pos2Dir[2] + k ] newCartPos = dot(transpose(self.relaxedVecs), transpose(newPos)) trialPos2.append(newCartPos) trialDistances = [] for pos in trialPos2: zpos2 = pos[2] distance = abs(zpos1 - zpos2) trialDistances.append(distance) buckleDistances.append(min(trialDistances)) return sum(buckleDistances) / len(buckleDistances)
def inv0(X): #inverts X if it is a matrix, otherwise, it finds numerical inverse try: Xm1 = inv(X) return Xm1 except IndexError: #print 'reverting to 1D for inverse matrix' return 1/float(X)
def move_vec(self, vec): vec = np.array(vec) vec = vec * 0.001 vec.resize(4, refcheck=False) vec = la.dot(la.inv(np.reshape(self.currmv, (4, 4))), vec) self.translate(*vec[0:3]) return vec[0:3]
def print_matrix_with_meta_data(filename, matrix): np.save(os.path.join(DATA_DIR, filename), matrix) details_file = open(os.path.join(DATA_DIR, filename + '.txt'), 'w') try: details_file.write(np.array2string(matrix) + "\n") details_file.write("\ndeterminer: " + str(linalg.det(matrix)) + "\n") details_file.write("inverse matrix determiner: " + str(linalg.det(linalg.inv(matrix))) + "\n") details_file.write("euclidean norm: " + str(linalg.norm(matrix, 'fro')) + "\n") details_file.write("euclidean norm \nof inverse matrix: " + str(linalg.norm(linalg.inv(matrix), 'fro')) + "\n") details_file.write("condition number: " + str(linalg.cond(matrix, 'fro'))) finally: details_file.close()
def move_vec( self , vec ) : vec = np.array(vec) vec = vec * 0.001 vec.resize( 4 , refcheck=False ) vec = la.dot( la.inv( np.reshape(self.currmv,(4,4)) ) , vec ) self.translate( *vec[0:3] ) return vec[0:3]
def invertHomography(homography): """Returns an inverted homography Unnecessary for reprojection over camera image""" from numpy.linalg.linalg import inv invH = inv(homography) invH /= invH[2, 2] return invH
def inv0(X): #inverts X if it is a matrix, otherwise, it finds numerical inverse try: Xm1 = inv(X) return Xm1 except IndexError: #print 'reverting to 1D for inverse matrix' return 1 / float(X)
def preconditioned_conjugate_gradient(A, x0, b, preconditioner, max_iterations): # Calculate the FFT of the first column on the circular of A gA = precompute_g(A) # Store initial solution x = x0 # Calculate initial residuals residual = b - fftmul(gA, x0) # Norm of the initial residual norm_residual0 = norm(residual, ord=2) # Calculate M based on the preconditioner and get its inverse Minv = inv(dot(preconditioner, preconditioner.T)) # Initial z vector z = dot(Minv, residual) # z^T*z zr = inner(z, residual) # Calculate initial direction direction = z for i in range(max_iterations): if i % 10000 == 0: logging.info('Preconditioned Conjugate Gradient iteration {}, dimenstion {}'.format(i, A.shape[0])) # If residuals are too small, terminate the algorithm if norm(residual, ord=2)/norm_residual0 < 0.0000001: logging.info('Preconditioned Conjugate Gradient converged after {} iterations'.format(i)) break # Store previous residuals and direction old_residual = residual old_direction = direction old_z = z old_zr = zr # Caclulate new update factor Ar = fftmul(gA, old_direction) a = old_zr / inner(old_direction, Ar) # Update solution x = x + a*old_direction # Update residuals residual = old_residual - a*Ar z = dot(Minv, residual) # Update direction zr = inner(z, residual) b = zr / old_zr direction = z + b*old_direction return x, i
def get_f(self, r): if r > self.R: r = self.R V_ = self.V[0:self.V.shape[0] - 1, 0:r] v = self.V[self.V.shape[0] - 1, 0:r] g = inv(dot(transpose(V_), V_)) a = dot(v, g) P = dot(a, transpose(V_)) return P
def mahalanobis_distance(difference, num_random_features): num_samples, _ = np.shape(difference) sigma = np.cov(np.transpose(difference)) mu = np.mean(difference, 0) if num_random_features == 1: stat = float(num_samples * mu ** 2) / float(sigma) else: try: linalg.inv(sigma) except LinAlgError: print('covariance matrix is singular. Pvalue returned is 1.1') warnings.warn('covariance matrix is singular. Pvalue returned is 1.1') return 0 stat = num_samples * mu.dot(linalg.solve(sigma, np.transpose(mu))) return chi2.sf(stat, num_random_features)
def transform(tfmat, vert, vert_frame_mat=np.eye(4), tf_frame_mat=None, out_frame_mat=None): """ Most general form of applying a transformation matrix. Apply transformation matrix tfmat on vertices with coordinates specified in vert_frame (current/input frame of reference). Apply the transformation in the coordinate frame given by tf_frame Output the vertices in out_frame #1. Bring vertices to world frame vert = vert_frame.to_world(vert) #2. Bring vertices to tf frame vert = tf_frame.from_world(vert) #3. Apply the transformation matrix in tf frame vert = apply_matrix(tfmat, vert) #4. Bring vertices to world frame vert = tf_frame.to_world(vert) #5. Bring vertices to out frame vert = out_frame.from_world(vert) """ # if output frame is not specified, set it to input frame. if out_frame_mat is None: out_frame_mat = vert_frame_mat # if the frame of reference for transformation is not specified, perform transform in the vertex frame of reference. if tf_frame_mat is None: tf_frame_mat = vert_frame_mat if isinstance(vert_frame_mat, CoordFrame): vert_frame_mat = vert_frame_mat.m if isinstance(tf_frame_mat, CoordFrame): tf_frame_mat = tf_frame_mat.m if isinstance(out_frame_mat, CoordFrame): out_frame_mat = out_frame_mat.m # ensure 4x4 vert_frame_mat = m4(vert_frame_mat) tf_frame_mat = m4(tf_frame_mat) out_frame_mat = m4(out_frame_mat) tfmat = m4(tfmat) mat = inv(out_frame_mat) @ tf_frame_mat @ tfmat @ inv( tf_frame_mat) @ vert_frame_mat return apply_matrix(mat, vert)
def regularised_ml_weights(inputmtx, targets, reg_coeff): """ This method returns the regularised weights that give the best linear fit between the processed inputs and the targets. """ Phi = np.matrix(inputmtx) targets = np.matrix(targets).reshape((len(targets), 1)) I = np.identity(Phi.shape[1]) weights = linalg.inv(I * reg_coeff + Phi.transpose() * Phi) * Phi.transpose() * targets return np.array(weights).flatten()
def transformYIQ2RGB(imgYIQ: np.ndarray) -> np.ndarray: """ Converts an YIQ image to RGB color space :param imgYIQ: An Image in YIQ :return: A RGB in image color space """ transform = np.array([[0.299, 0.587, 0.114], [0.596, -0.275, -0.321], [0.212, -0.523, 0.311]]) inverse_mat = linalg.inv(transform) # inverse the transform matrix res = np.dot(imgYIQ, inverse_mat.T) return res
def pdf(self): """ Partially applied Gaussian pdf """ dim = self.mean.shape[0] const = 1 / (((2*numpy.pi)**(dim/2.0)) * (det(self.cov)**0.5)) inv_cov = inv(self.cov) def gauss_pdf(x): sub = x - self.mean exponent = -0.5* sub.T * inv_cov * sub if (numpy.shape(exponent) != (1,1)): raise AssertionError return const * (numpy.e ** exponent[0,0]) return gauss_pdf
def getOriginalPositions(self, structureDir): # These are going to be in Cartesian coordinates fullStructPath = os.path.abspath(structureDir) poscar = open(fullStructPath + '/POSCAR', 'r') poscarLines = poscar.readlines() poscar.close() stringvecs = poscarLines[2:5] stringvecs = [line.strip().split() for line in stringvecs] self.origVecs = [] for vec in stringvecs: newvec = [float(comp) for comp in vec] self.origVecs.append(newvec) counts = poscarLines[5].strip().split() counts = [int(count) for count in counts] self.structureInfo = [] self.structureInfo.append(counts[0] / 2) self.structureInfo.append(counts) total = sum(counts) positionLines = poscarLines[7:7 + total] # Convert to Direct coordinates in terms of the !! NEW !! lattice vectors and then back to # Cartesian coordinates. positions = [] for line in positionLines: newStringPosition = line.strip().split() newPosition = [float(comp) for comp in newStringPosition] directs = [] r = dot(inv(transpose(self.relaxedVecs)), transpose(newPosition)) # Change to direct coordinates. for i in [-1, 0, 1]: for j in [-1, 0, 1]: for k in [-1, 0, 1]: directs.append([r[0] + i, r[1] + j, r[2] + k]) carts = [] for pos in directs: rnew = dot( transpose(self.relaxedVecs), transpose(pos)) # Change back to cartesian coordinates. carts.append(rnew) positions.append(carts) return positions
def apply_load(self, F, dT): ''' Obtain strains of stacking due to loading F = [n_x, n_y, n_xy, m_x, m_y, m_xy] in N/m :param: F: force vector :param: dT: delta temperature :return: eps0: vector of strains due to normal loads eps0=[eps_x, eps_y, eps_xy] :return: eps1: vector of strains due to bending loads eps1=[eps_x, eps_y, eps_xy] ''' # Reddy, Eq. 3.3.40 eps = np.dot(inv(self.ABD), (F + self.QLAL * dT)) eps0 = eps[0:3] eps1 = eps[3:6] return eps0, eps1
def epComputeParams(self, K, KI, g): """calculate the ep Parameters K: plain kernel matrix g: [0,1]: natural parameter rep. [2]: 0. moment for lml """ #inverse of EP kernel matrix KepI = SP.diag(g[:, 1]) Sigma = linalg.inv(KI + KepI) #however g[:,0] = mu/var... so that's all we need mu = SP.dot(Sigma, g[:, 0]) #TODO: implement lml lml = 0 return [Sigma, mu, lml]
def update_w(w0, nk, sk, xk, m0, beta0, betak): """ :param w0: [dim,dim] :param nk: [K,] :param sk: [K,dim,dim] :param xk: [K,dim] :param m0: [1,dim] :param beta0: scalar :param betak: [K,] :return: w, w_inv [k, dim, dim] """ diff = xk - m0 #[K, dim] diff = np.expand_dims(diff, axis=-1) #[K,dim,1] diff = np.einsum('kin,kjn->kij', diff, diff) #[K, dim, dim] prefactor = beta0*np.divide(nk, betak) #[K,] prefactor = np.expand_dims(prefactor, axis=-1) prefactor = np.expand_dims(prefactor, axis=-1) #[K, 1, 1] diff = np.multiply(prefactor, diff) #[K, dim, dim] nk = np.expand_dims(nk, axis=-1) nk = np.expand_dims(nk, axis=-1) #[K, 1, 1] w0_inv = np.expand_dims(inv(w0), axis=0) #[1,dim,dim] w_inv = np.multiply(nk, sk) + diff + w0_inv #[K,dim,dim] K = w_inv.shape[0] w = np.zeros(w_inv.shape) #Invert (safely) for k in range(K): try: w[k, :, :] = inv(w_inv[k, :, :]) except linalg.linalg.LinAlgError: print('w_inv = ', w_inv[k, :, :]) raise linalg.linalg.LinAlgError() return w, w_inv
def pdf(self): """ Partially applied Gaussian pdf """ dim = self.mean.shape[0] const = 1 / (((2 * numpy.pi)**(dim / 2.0)) * (det(self.cov)**0.5)) inv_cov = inv(self.cov) def gauss_pdf(x): sub = x - self.mean exponent = -0.5 * sub.T * inv_cov * sub if (numpy.shape(exponent) != (1, 1)): raise AssertionError return const * (numpy.e**exponent[0, 0]) return gauss_pdf
def method_gomory(a, b, c): f_prev = np.inf iter = 0 n_p = len(c) sol = solve_lp(a, b, c) while (sol): m, n = len(b), len(c) x, jb, f = sol print(f'\niter {iter}\nx {x}\njb {jb}\nf {f}') if n > n_p + 3: j_r = [j for j in jb if j >= n_p] if j_r: print(f'delete, j {j_r}') a, b, c, jb = correct_task(a, b, c, jb, j_r) m, n = len(b), len(c) j_k = get_j_k(f_prev, f, x, [j for j in jb if j < n_p]) print('j_k', j_k) if j_k == -1: print(f'\nsol\nx {x[:n_p]}\nf {f}') return k = jb.index(j_k) ab_inv = linalg.inv(a[:, jb]) print('ab_inv', ab_inv) e = np.zeros(m) e[k] = 1 y = np.dot(e, ab_inv) print('y', y) a_j = np.dot(y, a) print('a_j', a_j) betta = np.dot(y, b) print('betta', betta) f_j = [-(i - math.floor(i)) if not is_integer(i) else 0 for i in a_j] # f_j = [-(i % 1) if not is_integer(i) else 0 for i in a_j] if all([f_j[i] > -eps for i in range(n) if i not in jb]): print('нет решений') return e = np.zeros((m + 1, 1)) e[m] = 1 a = np.vstack([a, f_j]) a = np.hstack([a, e]) b.append(-(betta - math.floor(betta))) c.append(0) f_prev = f jb.append(n) sol = dual_simplex_method2(a, b, c, jb) iter += 1
def computeCenter(patch, cell): image = cell.image x = np.array([cell.center[0], cell.center[1], 1]) R = np.array([[image.pmat[0][0], image.pmat[0][1], image.pmat[0][2]], [image.pmat[1][0], image.pmat[1][1], image.pmat[1][2]], [image.pmat[2][0], image.pmat[2][1], image.pmat[2][2]]]) t = np.array([image.pmat[0][3], image.pmat[1][3], image.pmat[2][3]]) X = inv(R) @ (x - t) X = np.array([X[0], X[1], X[2], 1]) vect = X - image.center t = -(patch.normal @ X - patch.normal @ patch.center) / ( patch.normal @ vect) return X + t * vect
def pdf_mat(self): """ Return a partially applied Gaussian pdf that takes in a matrix whose columns are the input vectors""" dim = self.mean.shape[0] const = 1 / (((2*numpy.pi)**(dim/2.0)) * (det(self.cov)**0.5)) inv_cov = inv(self.cov) def gauss_pdf_mat(x): """Partially applied Gaussian pdf that takes in a matrix whose columns are the input vectors""" sub = x - self.mean r0 = inv_cov * sub exponent = -0.5 * numpy.sum(sub.A * r0.A, axis=0) if (numpy.shape(exponent) != (x.shape[1],)): raise AssertionError("exponent has the wrong shape, should be (%d,), but is (%d,)" % x.shape[1], exponent.shape[0]) g = const * (numpy.e ** exponent) return g return gauss_pdf_mat
def main(): n = 3 A = [[1, 0, 5], [2, 1, 6], [3, 4, 0]] # дана вот такая матрица x = [2, 2, 2] # собственно столбец x i = 1 # номер столбца, который в матрице будет заменён на данный нам столбец x A_inv = linalg.inv( A ) # Дана ещё матрица, обратная матрице А. Для простоты я её так получал, а не вбивал # Задача следующая # Пусть мы в матрице А заменили i-ый столбец на столбец x. Обзовём полученную матрицу B # Нужно ответить на 2 вопроса: # 1. Является ли эта матрица B обратимой(то есть есть ли для неё обратная я так понимаю) # 2. Если обратима, то найти обратную method(A, A_inv, i, x, n) # собственно сам метод
def resample_roi_array(target_affine: np.ndarray, target_shape: tuple, source_affine: np.ndarray, source_shape: tuple, source_data: np.ndarray) -> np.ndarray: """Resamples source ROI array with affine matrices of both source space and target space. Since transformation is performed by matrix multiplications, target coordinate (i, j, k) is rounded to `int` type. It is probably acceptable for ROI data or other discrete data, but rather questionable for continuous data. Args: target_affine (np.ndarray): Affine matrix of target space. target_shape (tuple): Shape of target. source_affine (np.ndarray): Affine matrix of source space. source_shape (tuple): Shape of source. source_data (np.ndarray): Source data. Returns: np.ndarray: Output in target shape. """ from numpy.linalg import linalg output_array = np.zeros(target_shape) output_array[:, :, :] = np.nan imax, jmax, kmax = target_shape imesh, jmesh, kmesh = np.meshgrid(np.arange(imax), np.arange(jmax), np.arange(kmax)) imesh, jmesh, kmesh = imesh.flatten(), jmesh.flatten(), kmesh.flatten() coordinates = np.row_stack( (imesh, jmesh, kmesh, np.ones([1, imax * jmax * kmax]))) linear_transform = np.matmul(linalg.inv(source_affine), target_affine) new_coordinates = np.matmul(linear_transform, coordinates) # Apply transformation new_coordinates = np.round(new_coordinates[:3]).astype( int) # Remove axis 3, retaining axis 0, 1, and 2 # Remove indices lower than 0 or exceeding original data shape on any axis # A legal 3-D coordinate should satisfy following requirements: # 1. Not lower than 0; # 2. Not exceeding shape of source data legal = np.where( np.sum((new_coordinates >= 0) * (new_coordinates < np.array([source_shape]).T), axis=0) == 3) # `legal` is a tuple output_array[(imesh[legal], jmesh[legal], kmesh[legal])] = source_data[tuple( new_coordinates[:, legal[0]])] return output_array
def findObject(frameNum, x, y,): import cv2 from numpy import array from numpy.core import dot from numpy.lib.function_base import append from numpy.linalg.linalg import inv from numpy import loadtxt homographyFilename = "laurier-homography.txt" homography = inv(loadtxt(homographyFilename)) databaseFilename = "laurier.sqlite" trajectoryType = "object" cap = cv2.VideoCapture('laurier.avi') width = cap.get(3) height = cap.get(4) cap.release() objects = storage.loadTrajectoriesFromSqlite(databaseFilename, trajectoryType) features = storage.loadTrajectoriesFromSqlite(databaseFilename, "feature") px = 0.2 py = 0.2 pixelThreshold = 800 for obj in objects: if obj.existsAtInstant(frameNum): obj.setFeatures(features) if obj.hasFeatures(): u = [] v = [] for f in obj.getFeatures(): if f.existsAtInstant(frameNum): projectedPosition = f.getPositionAtInstant(frameNum).project(homography) u.append(projectedPosition.x) v.append(projectedPosition.y) xmin = min(u) xmax = max(u) ymin = min(v) ymax = max(v) xMm = px * (xmax - xmin) yMm = py * (ymax - ymin) a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm)) yCropMin = int(max(0, .5 * (ymin + ymax - a))) yCropMax = int(min(height - 1, .5 * (ymin + ymax + a))) xCropMin = int(max(0, .5 * (xmin + xmax - a))) xCropMax = int(min(width - 1, .5 * (xmin + xmax + a))) if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > pixelThreshold: if x > xCropMin and x < xCropMax and y > yCropMin and y < yCropMax: print obj.getNum()
def calc_equivalent_modulus(self): """Calculates the equivalent laminate properties. The following attributes are calculated: e1, e2, g12, nu12, nu21 """ if not self.lam3D: AI = np.matrix(self.ABD, dtype=DOUBLE).I a11, a12, a22, a33 = AI[0, 0], AI[0, 1], AI[1, 1], AI[2, 2] self.e1 = 1. / (self.t * a11) self.e2 = 1. / (self.t * a22) self.g12 = 1. / (self.t * a33) self.nu12 = -a12 / a11 self.nu21 = -a12 / a22 # Eq. 5.110 Ganesh/Rana Lecture19 Hygrothermal laminate theory # or Eq. 4.72 into Eg.4.64 with delta_T=1 (Kaw 2006) a = np.squeeze(np.array(np.dot(AI, self.QLAL))) self.a1 = a[0] self.a2 = a[1] self.a12 = a[2] else: H = inv(self.C_general) # Bogetti 1995 Eq. 29 self.e1 = 1. / H[0, 0] # Bogetti 1995 Eq. 30 self.e2 = 1. / H[1, 1] # Bogetti 1995 Eq. 31 self.e3 = 1. / H[2, 2] # Bogetti 1995 Eq. 32 self.g23 = 1. / H[3, 3] # Bogetti 1995 Eq. 33 self.g13 = 1. / H[4, 4] # Bogetti 1995 Eq. 34 self.g12 = 1. / H[5, 5] # Bogetti 1995 Eq. 35 self.nu23 = -H[1, 2] / H[1, 1] # Bogetti 1995 Eq. 36 self.nu13 = -H[0, 2] / H[0, 0] # Bogetti 1995 Eq. 37 self.nu12 = -H[0, 1] / H[0, 0] # Bogetti 1995 Eq. 38 self.nu32 = -H[1, 2] / H[2, 2] # Bogetti 1995 Eq. 39 self.nu31 = -H[0, 2] / H[2, 2] # Bogetti 1995 Eq. 40 self.nu21 = -H[0, 1] / H[1, 1] # Bogetti 1995 Eq. 41 N = self.N self.a1 = np.dot(H[0, :], N) # Bogetti Eq. 44 self.a2 = np.dot(H[1, :], N) # Bogetti Eq. 45 self.a3 = np.dot(H[2, :], N) # Bogetti Eq. 46 self.a23 = np.dot(H[3, :], N) # Bogetti Eq. 47 self.a13 = np.dot(H[4, :], N) # Bogetti Eq. 48 self.a12 = np.dot(H[5, :], N) # Bogetti Eq. 49
def get_synthetic_data(n_samples, n_features, precision_matrix=None, alpha=0.98, seed=1): """ Generate synthetic data using a covariance matrix obtained by inverting a randomly generated precision matrix. Args: n_samples ([type]): [description] n_features ([type]): [description] precision_matrix ([type], optional): [description]. Defaults to None. alpha (float, optional): [description]. Defaults to 0.98. seed (int, optional): [description]. Defaults to 1. Returns: tuple: a tuple with two elements. The first is a pd.DataFrame represeting the data. The second is the precision matrix used to generate the data. """ prng = np.random.RandomState(seed) if precision_matrix is None: prec = make_sparse_spd_matrix(n_features, alpha=alpha, smallest_coef=.1, largest_coef=.9, random_state=prng) else: prec = precision_matrix cov = linalg.inv(prec) d = np.sqrt(np.diag(cov)) cov /= d cov /= d[:, np.newaxis] prec *= d prec *= d[:, np.newaxis] X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) X -= X.mean(axis=0) X /= X.std(axis=0) X = pd.DataFrame(X) X.columns = ["gene" + str(i) for i in X.columns] X.index = ["sample" + str(i) for i in X.index] return X, prec
def gaussian_mvn_pdf(X, mean, cov): ''' Return posterior probabilities approximated by a Gaussian with provided mean and covariance. Params: X: Data to be classified (Dx1) mean: Mean vector of the data (Dx1) cov: Covariance matrix of the data (DxD) Returns: p: posterior probabilities ''' D = det(cov) inv_cov = inv(cov) X_shift = X - mean p_1 = 1 / (((2 * np.pi)**(len(mean) / 2)) * (D**(1 / 2))) p_2 = (-1 / 2) * ((X_shift.T) @ (inv_cov) @ (X_shift)) prior_prob = p_1 * np.exp(p_2) return prior_prob
def ellipse(self, u, v, Marginal=False, M=500): '''In a 2-d subspace defined by vectors u and v and given covariance in terms self.cov_vecs and self.cov_vals, calculate and return sets of coefficients (x,y) that satisfy <x_i u,\Sigma^{-1}y_iv> = 1. Default: Use conditional distribution in the subspace. Alternative with Marginal=True: Use marginal distribution in the subspace. ''' if Marginal: # First use spectral decomposition t = np.dot(np.array((u,v)), self.cov_vecs.T) # 2xL Q = np.dot(self.cov_vals.flatten()*t, t.T) # covar of u and v QI = LA.inv(Q) '''The above is (E * W)^T * Lambda * E * W where W has columns u and v and the rows of E are the eigenvectors. Next I verify that the following is equivalent: t = np.array((u,v)) Q = self.inner(t, self.inner(self.Cov,t.T)) ''' t = np.array((u,v)) Q = self.inner(t, self.inner(self.Cov,t.T)) error = np.eye(2) - np.dot(Q,QI) assert np.max(np.abs(error)) < 1e-10 a = QI[0,0] b = QI[0,1] c = QI[1,1] else: # Use conditional variance def iqf(f,g): """ Inverse quadratic form <f,\Sigma^{-1}g> """ # get f and g in diagonal basis t = np.dot(np.array((f,g)), self.cov_vecs) return np.dot(t[0],t[1]/self.cov_vals.flatten()) a = iqf(u,u) b = iqf(u,v) c = iqf(v,v) step = 2*np.pi/M theta = np.arange(0,2*np.pi+0.5*step,step) sin = np.sin(theta) cos = np.cos(theta) rr = 1/(a*cos*cos + 2*b*cos*sin + c*sin*sin) r = np.sqrt(rr) return (r*cos,r*sin)
def fit_model ( self) : #N_stations = len(self.stations) N_stations = len(self.stat_select[:]) N_times = len(self.times) N_sources = len(self.sources) N_pol = min(len(self.polarizations),2) G = kron(eye( N_sources ), ( eye( N_stations ) - ones((N_stations, N_stations)) / N_stations)) if 'TECfit' in self.hdf5.root: self.hdf5.root.TECfit.remove() self.TECfit = self.hdf5.createArray(self.hdf5.root, 'TECfit', zeros(self.TEC[:].shape)) if 'TECfit_white' in self.hdf5.root: self.hdf5.root.TECfit_white.remove() self.TECfit_white = self.hdf5.createArray(self.hdf5.root, 'TECfit_white', zeros(self.TEC[:].shape)) self.offsets = zeros((len(self.n_list),N_pol)) p = ProgressBar(len(self.n_list), "Fitting phase screen: ") za=self.piercepoints.cols.zenith_angles[:] for i in range(len(self.n_list)) : p.update( i ) U = self.U_list[i] S = self.S_list[i] for pol in range(N_pol) : #print self.TEC[ self.n_list[i], :, :,pol].shape TEC = multiply(self.TEC[:][ self.n_list[i], self.stat_select[:], :,pol].swapaxes(0,1), cos(za[i,:,:])).reshape( (N_sources * N_stations, 1) ) TECfit = dot(U, dot(inv(dot(U.T, dot(G, U))), dot(U.T, dot(G, TEC)))) TECfit_white = dot(U, dot(diag(1/S), dot(U.T, TECfit))) self.offsets[i,pol] = TECfit[0] - dot(self.C_list[i][0,:], TECfit_white) TECfit=reshape( TECfit, (N_sources,N_stations) ).swapaxes(0,1) TECfit_white= reshape( TECfit_white,(N_sources,N_stations) ).swapaxes(0,1) for istat,stat in enumerate(self.stat_select[:]): self.TECfit[i, stat, : ,pol] = TECfit[istat,:] self.TECfit_white[i,stat,: ,pol ] = TECfit_white[istat,:] p.finished() self.TECfit_white.attrs.r_0 = self.r_0 self.TECfit_white.attrs.beta = self.beta if 'offsets' in self.hdf5.root: self.hdf5.root.offsets.remove() self.hdf5.createArray(self.hdf5.root, 'offsets', self.offsets)
def _enforce(self, q_warm): self.converged = False self.robot.rave.SetDOFValues(q_warm) self.robot.rave.SetActiveDOFs(self.active_indexes) q_max = array([DOF_SCALE * dof.ulim for dof in self.active_dofs]) q_min = array([DOF_SCALE * dof.llim for dof in self.active_dofs]) I = eye(self.nb_active_dof) q = full_to_active(q_warm, self.active_dofs) self.robot.rave.SetActiveDOFValues(q) for itnum in xrange(self.max_iter): conv_vect = array([norm(task.f()) for task in self.tasks]) if numpy.all(conv_vect < self.conv_thres): self.converged = True break if DEBUG_IK: conv = ["%10.8f" % x for x in conv_vect] print " %4d: %s" % (itnum, ' '.join(conv)) ker_proj = eye(self.nb_active_dof) dq = zeros(self.nb_active_dof) qd_max_reg = self.gain * (q_max - q) qd_min_reg = self.gain * (q - q_min) for i, task in enumerate(self.tasks): J = task.J() Jn = dot(J, ker_proj) b = -self.gain * task.f() - dot(J, dq) In = eye(Jn.shape[0]) sr_inv = dot(Jn.T, linalg.inv(dot(Jn, Jn.T) + 1e-8 * In)) dq += dot(sr_inv, b) ker_proj = dot(ker_proj, I - dot(linalg.pinv(Jn), Jn)) qd_max_reg = self.gain * (q_max - q) qd_min_reg = self.gain * (q - q_min) q += solve_ineq(I, dq, I, qd_max_reg, qd_min_reg) self.robot.rave.SetActiveDOFValues(q) return self.robot.rave.GetDOFValues()
def main(): A = np.array([[4, 3, 4, 10], [2, -7, 3, 0], [-2, 11, 1, 3], [3, -4, 0, 2]], dtype=np.float64) print("\nInverse matrix calculated by the Numpy API:") AinvNpy = linalg.inv(A) print("NumPy: A . Ainv = \n", np.dot(A, AinvNpy)) print("NumPy: Ainv = \n", AinvNpy) print("\nInverse matrix calculated by the Gauss method:") AinvGauss = inverseGauss(A) print("Gauss: A . Ainv = \n", np.dot(A, AinvGauss)) print("Gauss: Ainv = \n", AinvGauss) print("\nInverse matrix calculated by the Newton method:") AinvNewton, error, nIter = inverseNewton(A) print("Newtow: A . Ainv = \n", np.dot(A, AinvNewton)) print("Newtow: Ainv = \n", AinvNewton) print("Number of iterations: ", nIter) print("Error from Numpy API results: ", error) print("\nDone!")
def test_simple_array_operations(self): a = array([[1.,2.], [3.,4.]]) numpy.testing.assert_array_equal(a.transpose(), array([[1.,3.], [2.,4.]])) numpy.testing.assert_array_almost_equal(trace(a), 5) inv_a = inv(a) b = array([[-2.,1.], [1.5,-.5]]) self.assertTrue(numpy.allclose(inv_a,b)) i = dot(a,inv_a) numpy.testing.assert_array_almost_equal(i, eye(2), 1) numpy.testing.assert_array_almost_equal(inv_a, b) # system of linear equations a = array([[3,2,-1], [2,-2,4], [-1,0.5,-1]]) b = array([1,-2,0]) c = solve(a,b) d = dot(a,c) numpy.testing.assert_array_almost_equal(b, d, 1) a = array([[.8,.3], [.2,.7]]) eigen_values, eigen_vectors = eig(a) lambda_1 = eigen_values[0] x_1 = eigen_vectors[:,0] lambda_2 = eigen_values[1] x_2 = eigen_vectors[:,1]
def GPUCB(func = f, kernel = DoubleExponential, params_dist = {'x': Uniform(start = 0, end = 5)}, prev_X = None, prev_y = None, sig = .1, mu_prior = 0, sigma_prior = 1, n_grid = 100, n_iter = 10, seed = 2, time_budget = 36000): time_start = time.time() np.random.seed(seed) n_params = len(params_dist) params_name = params_dist.keys() grid, grid_scaled = GetRandGrid(n_grid, params_dist) mu = np.zeros(n_grid) + mu_prior sigma = np.ones(n_grid)*sigma_prior X = np.empty((n_iter, n_params)) X_scaled = np.matrix(np.empty((n_iter, n_params))) y = np.empty(n_iter) logger.info("%4s |%9s |%9s |%s", "Iter", "Func", "Max", '|'.join(['{:6s}'.format(i) for i in params_name])) for i in xrange(n_iter): #beta = 2*np.log((i+1)**2*2*np.pi**2/3/.1) + \ # 2*n_params*np.log((i+1)**2*n_params) beta = (i+1)**2 #ipdb.set_trace() idx = np.argmax(mu + np.sqrt(beta)*sigma) X[i,:] = grid[idx] X_scaled[i] = grid_scaled[idx] y[i] = func(**dict(zip(params_name, X[i]))) invKT = inv(kernel(X_scaled[:i+1], X_scaled[:i+1])*sigma_prior**2 + sig**2*identity(i + 1)) grid, grid_scaled = GetRandGrid(n_grid, params_dist) kT = kernel(X_scaled[:i+1], grid_scaled)*sigma_prior**2 mu = mu_prior + kT.T.dot(invKT).dot(y[:i+1] - mu_prior) sigma2 = np.ones(n_grid)*sigma_prior**2 - diag(kT.T.dot(invKT).dot(kT)) sigma = np.sqrt(sigma2) logger.info("%4d |%9.4g |%9.4g |%s" , i, y[i], np.max(y[:i+1]), '|'.join(['{:6.2g}'.format(i) for i in X[i]])) if time.time() - time_start > time_budget: break ipdb.set_trace() if True: figure(1); plt.clf(); xlim((0,5)); ylim(-4,10); index = np.argsort(grid[:,0]) gr = grid[:,0] plot(gr[index], mu[index], color = 'red', label = "Mean") plot(gr[index], mu[index] + sigma[index], color = 'blue', label = "Mean + Sigma") plot(gr[index], mu[index] - sigma[index], color = 'blue', label = "Mean - Sigma") plot(X[:i+1,0], y[:i+1], 'o', color = 'green', label = "Eval Points") plot(np.linspace(0,5, num = 500),func(np.linspace(0,5, num = 500)), color = 'green', label = "True Func") plot(gr[index], mu[index] + sqrt(beta)*sigma[index], color = 'yellow', label = "Mean + sqrt(B)*Sigma") plt.grid() legend(loc = 2) show() return {'X': X, 'y': y, 'mu': mu, 'beta': beta, 'sigma': sigma, 'grid': grid}
return pts if __name__ == "__main__": # Read image file names fileA ="uttower1.jpg"# raw_input("please read ur image name here: ") fileB ="uttower2.jpg"#raw_input("please read ur image name here: ") imageA = mpimg.imread(fileA) imageB = mpimg.imread(fileB) pts = getCorrespondence(imageA, imageB,8) mos=mosiac.mos() #H=mos.getH_2(pts)#(pts[0], pts[1]) #print H H=mos.getH(pts[0], pts[1])#(pts, 8)#(pts[0], pts[1]) invH=inv(H) newImg=mos.applyMosiac(imageB,imageA,invH,H) # fig = plt.figure() plt.show(); for pt in pts[0]: p=np.dot(H,[pt[0],pt[1],1]) p[0]=int(float(p[0])/float(p[2])) p[1]=int(float(p[1])/float(p[2])) p[2]=1 plt.plot(p[0]-mos.minX,p[1]-mos.minY,'xr',) # plt.imshow(newImg) plt.axis('image')
def M(t, nu): '''Two-level method''s symbol with nu pre-relaxations per cycle.''' return (np.eye(sum(1 for _ in harmonics(t))) - P(t) * inv(Ac(t)) * R(t) * A(t)) * matrix_power(S(t), nu)
if 0: #check gradients of warping function from pygp.optimize.optimize_base import checkgrad,OPT # derivative w.r.t. y # derivative w.r.t y psi def f1(x): return warping_function.f(x,hyperparams['warping']) def df1(x): return warping_function.fgrad_y(x,hyperparams['warping']) def f2(x): return warping_function.fgrad_y(gp.y[10:11],x) def df2(x): return warping_function.fgrad_y_psi(gp.y[10:11],x) C = linalg.inv(gp.get_covariances(hyperparams)['K']) Cs = C.copy() def f3(x): return SP.double(warping_function.pLML(x,C,gp.y)) def df3(x): return warping_function.pLMLgrad(x,C,gp.y) def f4(x): hyperparams['warping'][:] = x return gp.LML(hyperparams,) def df4(x): hyperparams['warping'][:] = x return gp.LMLgrad(hyperparams)['warping'] x = hyperparams['warping'].copy() checkgrad(f4,df4,x)
parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float) parser.add_argument('-u', dest = 'undistort', help = 'undistort the video (because features have been extracted that way)', action = 'store_true') parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', type = int) parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float) parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each display', default = 1, type = int) parser.add_argument('--save-images', dest = 'saveAllImages', help = 'save all images', action = 'store_true') parser.add_argument('--last-frame', dest = 'lastFrameNum', help = 'number of last frame number to save (for image saving, no display is made)', type = int) args = parser.parse_args() if args.configFilename: # consider there is a configuration file params = storage.ProcessParameters(args.configFilename) videoFilename = params.videoFilename databaseFilename = params.databaseFilename if params.homography is not None: homography = inv(params.homography) else: homography = None intrinsicCameraMatrix = params.intrinsicCameraMatrix distortionCoefficients = params.distortionCoefficients undistortedImageMultiplication = params.undistortedImageMultiplication undistort = params.undistort firstFrameNum = params.firstFrameNum else: homography = None undistort = False intrinsicCameraMatrix = None distortionCoefficients = [] undistortedImageMultiplication = None firstFrameNum = 0
import cv2 import storage from numpy.linalg.linalg import inv from numpy import loadtxt import cvutils databaseFilename = "laurier.sqlite" objectNumber = [0] trajectoryType = "feature" objects = storage.loadTrajectoriesFromSqlite(databaseFilename, trajectoryType) obj = objects[0] print obj.existsAtInstant(5) homographyFilename = "laurier-homography.txt" homography = inv(loadtxt(homographyFilename)) fNo = 10 p = [150, 80] def projectArray(homography, points): from numpy.core import dot from numpy.lib.function_base import append if points.shape[0] != 2: raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1])) if (homography is not None) and homography.size>0: augmentedPoints = append(points,[[1]*points.shape[1]], 0) prod = dot(homography, augmentedPoints) return prod[0:2]/prod[2]
def road_user_traj(fig, filename, fps, homographyFile, roadImageFile): """ Plots all road-user trajectories. """ homography = inv(loadtxt(homographyFile)) # print(homography) connection = sqlite3.connect(filename) cursor = connection.cursor() queryStatement = 'SELECT * FROM objects ORDER BY object_id' cursor.execute(queryStatement) usertypes = [] for row in cursor: usertypes.append(row[1]) queryStatement = 'SELECT * FROM object_trajectories ORDER BY object_id, frame' cursor.execute(queryStatement) obj_id = 0 obj_traj_x = [] obj_traj_y = [] # aplot = QTPLT() ax = fig.add_subplot(111) im = imread(roadImageFile) implot = ax.imshow(im) # colors = [(0,0,0), (0,0,1), (0,1,0), (1,0,1), (0,1,1), (1,1,0), (1,0,1)] userlist = ['unknown', 'car', 'pedestrian', 'motorcycle', 'bicycle', 'bus', 'truck'] colors = {'unknown': (0, 0, 0), 'car': (0, 0, 1), 'pedestrian': (0, 1, 0), 'motorcycle': ( 1, 0, 0), 'bicycle': (0, 1, 1), 'bus': (1, 1, 0), 'truck': (1, 0, 1)} for row in cursor: pos = Point(row[2], row[3]) # xpos = row[2] # ypos = row[3] # usertype = usertypes[obj_id] # print pos.x, pos.y pos = pos.project(homography) # print pos.x, pos.y obj_traj_x.append(pos.x) obj_traj_y.append(pos.y) # print(obj_traj) if(row[0] != obj_id): # color = random.choice(colors) usertype = userlist[usertypes[obj_id]] ax.plot(obj_traj_x[:-1], obj_traj_y[:-1], ".-", color=colors[usertype], label=usertype, linewidth=2, markersize=3) # print 'switching object to: ', row[0] obj_id = row[0] obj_traj_x = [] obj_traj_y = [] # Now add the legend with some customizations. # plot_handles = [] # for user in userlist: # handle = mpatches.Patch(color=colors[user], label=user) # plt.legend(handles=handle, loc='upper right', shadow=True) colorlist = [] recs = [] for i in range(0, len(userlist)): colorlist.append(colors[userlist[i]]) recs.append(mpatches.Rectangle((0, 0), 1, 1, fc=colorlist[i])) ax.set_position([0.1, 0.1, 0.85, 0.85]) # ax.legend(recs,userlist, loc='center left', bbox_to_anchor=(1, 0.5)) ax.legend(recs, userlist, loc=8, mode="expand", bbox_to_anchor=(-.5, -.5, .1, .1)) box = ax.get_position() ax.set_position( [box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) # Put a legend below current axis ax.legend(recs, userlist, loc='upper center', bbox_to_anchor=( 0.5, -0.05), fancybox=True, shadow=True, ncol=4) # ax.legend(recs, userlist, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) # legend = plt.legend(loc='upper right', shadow=True) ax.set_xlim([0, 1280]) ax.set_ylim([0, 720]) ax.set_ylim(ax.get_ylim()[::-1]) # ax.set_xlabel('X-Coordinate') # ax.set_ylabel('Y-Coordinate') ax.set_title('Road User Trajectories') # plt.show() connection.commit() connection.close()
def setParams(self,mu,cov): self.mu = mu self.cov = cov self._invCov = inv(self.cov) self._detCov = det(self.cov) self._multConst = 1 / sqrt((2 * pi) ** 3 * self._detCov)
def applyDICOV(msa=None, di=None, dca=None, psicov=None, **kwargs): """This function use a naïve Bayes classifier to combine DCA and PSICOV as described in [MW15]. The calculation is based on DI and PSICOV. You could provide these two matrices or the MSA. The PSICOV matrix should be the PPV scaled. You could use `buildPSICOV_expert` and use `use_raw_not_ppv`=0 option or use `applyPPV` to build it from the pre-PPV PSICOV matrix. [MW15] Mao W, Kaya C, Dutta A, et al. Comparative study of the effectiveness and limitations of current methods for detecting sequence coevolution[J]. Bioinformatics, 2015: btv103.""" from numpy import zeros_like, log, mgrid, fromfile, array, cov, diag, e, pi, trunc from numpy.linalg.linalg import inv from ..IO.output import printError from ..Constant import getconstantfunc if ((di == None and dca == None) or (psicov == None)) and msa == None: printError("DI and PSICOV matrices or MSA should be provided.") return None if di != None and dca != None and not (di == dca).all(): printError( "DI and DCA matrices are not the same, check it or just use one.") return None d = di if di != None else dca if dca != None else None p = psicov if psicov != None else None if d != None: if d.ndim != 2: printError("The dimension of DI matrix is wrong.") return None elif d.shape[0] != d.shape[1]: printError("DI matrix is not square.") return None elif p == None and getMSA(msa).shape[1] != d.shape[0]: printError("DI matrix does not fit the MSA.") return None if p != None: if p.ndim != 2: printError("The dimension of PSICOV matrix is wrong.") return None elif p.shape[0] != p.shape[1]: printError("PSICOV matrix is not square.") return None elif d == None and getMSA(msa).shape[1] != p.shape[0]: printError("PSICOV matrix does not fit the MSA.") return None if p != None and d != None: if p.shape[0] != d.shape[0]: printError("DI and PSICOV matrices have different sizes.") return None d = buildDI(msa) if d == None else d p = buildPSICOV_expert(msa, use_raw_not_ppv=0) if p == None else p n = di.shape[0] dicov = zeros_like(d) pplus = getconstantfunc('pplus') pminus = getconstantfunc('pminus') X, Y = mgrid[-5:0.05:0.05, -3:0.05:0.05] pplus.resize(X.shape) pminus.resize(X.shape) psigplus = getconstantfunc('psigplus') psigminus = getconstantfunc('psigminus') XX = mgrid[-5:0.05:0.05] psigplus.resize(XX.shape) psigminus.resize(XX.shape) prate = calcContactFrac(d.shape[0]) qrate = 1.0 - prate pplus = pplus * prate psigplus = psigplus * prate pminus = pminus * qrate psigminus = psigminus * qrate cdouble = zeros_like(pplus) pos = [] val = [] for i in xrange(pplus.shape[0]): for j in xrange(pplus.shape[1]): if pplus[i][j] <= 1e-3 or pminus[i][j] <= 1e-3: cdouble[i][j] = -1 else: cdouble[i][j] = pplus[i][j] / (pplus[i][j] + pminus[i][j]) pos.append([X[i][j], Y[i][j]]) val.append(cdouble[i][j]) pos = array(pos).T val = array(val) nn = val.shape[0] s = cov(pos) invs = inv(s) h = (nn ** (-1.0 / 6.0)) * (((2.0 ** (-1.0)) * (diag(s).sum())) ** .5) para1 = (-.5) * (h ** -2.) for i in xrange(pplus.shape[0]): for j in xrange(pplus.shape[1]): if cdouble[i][j] == -1: temp = array([X[i][j], Y[i][j]]).reshape((2, 1)) temp = e ** (para1 * ((invs.dot((pos - temp)) * (pos - temp)).sum(0))) cdouble[i][j] = (temp.dot(val).sum()) / temp.sum() csingle = zeros_like(psigplus) pos = [] val = [] for i in xrange(psigplus.shape[0]): if psigplus[i] <= 1e-8 or psigminus[i] <= 1e-8: csingle[i] = -1 else: csingle[i] = psigplus[i] / (psigplus[i] + psigminus[i]) pos.append(XX[i]) val.append(csingle[i]) pos = array(pos).T val = array(val) nn = val.shape[0] s = pos.std() h = ((4.0 / 3) ** (.2)) * s * (nn ** -.2) para1 = 1. / (nn * h * ((2 * pi) ** .5)) for i in xrange(psigplus.shape[0]): if csingle[i] == -1: temp = para1 * (e ** (-.5 * (((pos - XX[i]) / h) ** 2))) csingle[i] = (temp.dot(val).sum()) / temp.sum() for i in xrange(n): for j in xrange(i + 1, n): if psicov[i][j] == 0: ldi = log(di[i][j]) / log(10) temp = int(trunc((ldi + 5) / .05)) dicov[i][j] = dicov[j, i] = csingle[ temp] + (ldi - XX[temp]) / .05 * (csingle[temp + 1] - csingle[temp]) else: ldi = log(di[i][j]) / log(10) lps = log(psicov[i][j]) / log(10) temp1 = int(trunc((ldi + 5) / .05)) temp2 = int(trunc((lps + 3) / .05)) val = array([[cdouble[temp1, temp2], cdouble[ temp1, temp2 + 1]], [cdouble[temp1 + 1, temp2], cdouble[temp1 + 1, temp2 + 1]]]) dicov[i, j] = dicov[j, i] = array([X[temp1 + 1][temp2] - ldi, ldi - X[temp1][temp2]]).dot( val).dot(array([Y[temp1][temp2 + 1] - lps, lps - Y[temp1][temp2]])) * 400 return dicov
def fischer(dataset): colors = ['r', 'g', 'b'] labels = dataset[1] label_names = dataset[2] # Obtenemos el promedio de cada columna for i in xrange(len(label_names)): # Para cada clase de hace un one vs all c1 = label_names[i] new_labels = [] # Se crean nuevos labels para las clases: 0 y 1 for label in labels: if label == c1: new_labels.append(0) else: new_labels.append(1) # Separacion de datos, nuevamente (separated_data, tranposed, data) = common.separate_data((dataset[0], new_labels, [0, 1])) #Calculo de medias means = [ separated_data[0].mean(0), separated_data[1].mean(0) ] # inicializamos las matrices de scatter s1 = [[0,0,0,0], [0,0,0,0],[0,0,0,0],[0,0,0,0]] s2 = [[0,0,0,0], [0,0,0,0],[0,0,0,0],[0,0,0,0]] # Simplemente sacamos los scatters for row in separated_data[0]: m1 = array([row- means[0]]) m2 = array([row- means[0]]).transpose() s1 = s1 + dot(m2, m1) for row in separated_data[1]: m1 = array([row- means[1]]) m2 = array([row- means[1]]).transpose() s2 = s2 + dot(m2, m1) # Within class scatter sw = s1 + s2 inv_sw = inv(sw) mean_diff = array([means[0]-means[1]]) # mu1 - mu2, es necesario llevarlo a un "doble arreglo" para multiplicar matrices # Esta sera la direccion v optima direction = dot(inv_sw, mean_diff.T) p1 = [[],[]] for idx, row in enumerate(data): p1[[0, 1].index(new_labels[idx])].append(dot(row,direction)) # Ploteamos los datos proyectados print('Rojo: ' + c1) print('Azul: las otras') pyplot.plot(p1[0], zeros(len(p1[0])), 'or') pyplot.plot(p1[1], zeros(len(p1[1])), 'ob') pyplot.show()
#! /usr/bin/env python import argparse import storage from numpy.linalg.linalg import inv from numpy import loadtxt parser = argparse.ArgumentParser(description='The program creates bounding boxes in image space around all features (for display and for comparison to ground truth in the form of bouding boxes.') parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True) parser.add_argument('-o', dest = 'homography', help = 'name of the image to world homography') args = parser.parse_args() homography = None if args.homography is not None: homography = inv(loadtxt(args.homography)) storage.createBoundingBoxTable(args.databaseFilename, homography)
def getElectrodePositions(session,area): # now comes the hard part: identifying electrode locations in # cortical space. We use the same mapping function as the video # this generates a basis, we still need to convert the array into # that basis import pickle corners = pickle.load(open(CGID_PACKAGE+os.path.sep+'new_corners.p','rb')) # note: run compile_array_maps.py to build this maps = pickle.load(open(CGID_PACKAGE+os.path.sep+'maps.p','rb')) monkey = session[:3] arrayChannelMap = maps[monkey][area] availableChannels = int32(zeros(96)) foundchannels = set(ravel(arrayChannelMap)) if -1 in foundchannels: foundchannels.remove(-1) availableChannels[array(list(foundchannels))-1]=1 def getMatrixFromAnatomicalToImage(monkey): scale = 3.6 # size of M1 array in MM if monkey=='RUS': A,D,C,B = corners[monkey]['M1'] origin = B y_vector = C-origin x_vector = A-B basis = array([x_vector,y_vector]) if monkey=='SPK': A,D,C,B = corners[monkey]['M1'] origin = D y_vector = C-origin x_vector = A-D basis = array([x_vector,y_vector]) basis /= scale return origin,basis # This quadrilateral defines the physical locations of four points of # the array, starting at the top left, and proceeding counter-clockwise # around the array as specified in arrayChannelMap quad = array(corners[monkey][area]) origin,basis = getMatrixFromAnatomicalToImage(monkey) anatomical = inv(basis) quad = (quad-origin) quad = (quad.dot(anatomical)) # Need to interpolate in the quadralateral to get electrode positions # consider making this a subroutine positions = {} nrows,ncols = shape(arrayChannelMap) topleft,bottomleft,bottomright,topright = quad for chi in find(availableChannels)+1: row = find([any(r==chi) for r in arrayChannelMap]) col = find([any(c==chi) for c in arrayChannelMap.T]) if prod(shape(row))<1 or prod(shape(col))<1: print 'error cannot locate channel %d'%chi continue # The *2+1 accounts for the fact that the electrodes are in # the center of the square patches -- our quadrilateral defines the # outer boundary of the array, not the electrodes row_fraction = (row*2+1)/float(nrows*2) col_fraction = (col*2+1)/float(ncols*2) row_b = row_fraction*(bottomleft-topleft) col_b = col_fraction*(topright -topleft) position = row_b+col_b+topleft positions[chi] = position return quad,positions
def updateEP(self, K, logthetaL=None): """update a kernel matrix K using Ep approximation [K,t,C0] = updateEP(K,logthetaL) logthetaL: likelihood hyperparameters t: new means of training targets K: new effective kernel matrix C0:0th moments """ assert K.shape[0] == K.shape[1], "Kernel matrix must be square" assert K.shape[0] == self.n, "Kernel matrix has wrong dimension" #approximate site parmeters; 3 moments # note g is in natural parameter representation (1,2) g = SP.zeros([self.n, 2]) # a copy for damping g2 = SP.zeros([self.n, 2]) # 0. moment is just catptured in z z = SP.zeros([self.n]) # damping factors damp = SP.ones([self.n]) #approx is #p(f) = N(f|mu,Sigma) # where Sigma = (K^{-1} + PI^{-1})^{-1}; PI is created from teh diaginal # entries in g; PI = diag(Var(g)) # mu = Sigma*PI^{-1}Mean(g) # where \mu is form the site parameters in g also #add some gitter to make it invertible K += SP.eye(K.shape[0]) * 1E-6 #initialize current approx. of full covariance Sigma = K.copy() #invert Kernel matrix; which is used later on #TODO: replace by chol KI = linalg.inv(K) #current approx. mean mu = SP.zeros([self.n]) #conversion nat. parameter/moment representation n2mode = lambda x: SP.array([x[0] / x[1], 1 / x[1]]) #set hyperparameter of likelihood object self.likelihood.setLogtheta(logthetaL) for nep in range(self.Nep): #get order of site function update perm = SP.random.permutation(self.n) perm = SP.arange(self.n) for ni in perm: #cavity as natural parameter representation cav_np = n2mode([mu[ni], Sigma[ni, ni]]) - g[ni] #ensure we don't have negative variances. good idea? cav_np[1] = abs(cav_np[1]) #calculate expectation values (int_, int_y,int_y^2) ML = self.likelihood.calcExpectations(self.t[ni], cav_np, x=self.x[ni]) #1. and 2. moment can be back-calculated to enw site parameters #update the site parameters; #in natural parameters this is just deviding out the site function; v. convenient gn = n2mode(ML[0:2]) - cav_np #delta gn in nat. parameters dg = gn - g[ni] #difference of second moment (old-new) ds2 = gn[1] - g[ni, 1] #update with damping factor damp[ni] g[ni] = g[ni] + damp[ni] * dg if(g[ni, 1] < 0): g[ni, 1] = 1E-10 z[ni] = ML[2] if 1: #rank one updates Sigma2 = Sigma Sigma = Sigma - ds2 / (1 + ds2 * Sigma[ni, ni]) * SP.outer(Sigma[:, ni], Sigma[ni, :]) if 1: #check that Sigma is still pos. definite, otherweise we need to to do some damping... try: Csigma = linalg.cholesky(Sigma) #except linalg.linalg.LinAlgError: except LinAlgError: logging.debug('damping') Sigma = Sigma2 g[ni] = g2[ni] #increase damping factor damp[ni] *= 0.9 pass #update mu; mu[i] = Sigma[i,i]*(1/Var(g[i]))*Mean(g[i]) #as go is in nat. parameter this is always like this mu = SP.dot(Sigma, g[:, 0]) else: #slow updates Sigma = linalg.inv(KI + SP.diag(g[:, 1])); mu = SP.dot(Sigma, g[:, 0]) pass #after every sweep recalculate entire covariance structure [Sigma, mu, lml] = self.epComputeParams(K, KI, g) #create a copy for damping g2 = g.copy() pass if nep == (self.Nep - 1): #LG.warn('maximum number of EP iterations reached') pass #update site parameters self.muEP = g[:, 0] / g[:, 1] self.vEP = 1 / g[:, 1]