def getProjectionH(self, t): """ a method of projection a given general goal orientation into the subspace of KUKAYoubot' arm """ # TODO don't work :( # divide T(4x4) matrix of R(3x3) and vector p(3x1) px, py, pz = t[:3, 3] r = t[:3, :3] # divide R(3x3) matrix on components xt, yt, zt = r[:3, 0], r[:3, 1], r[:3, 2] # normal vector to plane of the manipulator pxy = sqrt(px ** 2 + py ** 2) #m = dot(1 / pxy, [-py, px, 0]) m = [0,-1,0] # normal vector to both zt and m k = cross(zt, m) # new vector zt pzt = cross(m, k) # t is angle between zt and plane of manipulator cost = dot(zt, pzt) sint = dot(cross(pzt, zt), k) # Rodrigues' formula pyt = dot(cost, yt) + dot(sint, cross(yt, k)) + \ dot(dot((1 - cost), dot(k, yt)), k) pxt = cross(pyt, pzt) # new rotate matrix pr = transpose([pxt, pyt, pzt]) t[:3, :3] = pr return t
def ll_dmdt(magnetic_parameters, t, m): alpha = magnetic_parameters.alpha h_eff = heff(magnetic_parameters, t, m) return ( -1/(1 + alpha**2) * (sp.cross(m, h_eff) + alpha*sp.cross(m, sp.cross(m, h_eff))) )
def coe_from_sv(R, V, mu): print(R, V) r = sci.linalg.norm(R) v = sci.linalg.norm(V) vr = sci.dot(R, V) / r #print(r, v, vr) #angular momentum H = sci.cross(R, V) h = sci.linalg.norm(H) #inclination incl = math.acos(H[2] / h) N = sci.cross([0, 0, 1], H) n = sci.linalg.norm(N) if n != 0: RA = math.acos(N[0] / n) if N[1] < 0: RA = 2 * pi - RA else: RA = 0 E = ((v**2 - mu / r) * R - r * vr * V) / mu e = sci.linalg.norm(E) if n != 0: if e > eps: w = math.acos(sci.dot(N, E) / n / e) if E[2] < 0: w = 2 * pi - w else: w = 0 else: w = 0 if e > eps: TA = math.acos(sci.dot(E, R) / e / r) if vr < 0: TA = 2 * pi - TA else: cp = sci.cross(N, R) if cp[2] >= 0: TA = math.acos(sci.dot(N, R) / n / r) else: TA = 2 * pi - math.acos(sci.dot(N, R) / n / r) a = h**2 / mu / (1 - e**2) coe = sci.array([h, e, RA, incl, w, TA, a], dtype="float64") print(coe) return coe
def llg_cartesian_residual(magnetic_parameters, t, m_cart, dmdt_cart): # Extract the parameters alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma Ms = magnetic_parameters.Ms h_eff = heff(magnetic_parameters, t, m_cart) residual = ((alpha / Ms) * sp.cross(m_cart, dmdt_cart) - gamma * sp.cross(m_cart, h_eff) - dmdt_cart) return residual
def check2dintersect(self,e): #Check if the two edges intersect in 2d. Ignores Z. p = self.a[:2] q = e.a[:2] r = (self.b-self.a)[:2] s = (e.b-e.a)[:2] if sp.cross(r,s)==0: return False t=sp.cross(q-p,s)/sp.cross(r,s) u = sp.cross(q-p,r)/sp.cross(r,s) if t>=0 and t<=1 and u>=0 and u<=1: return True return False
def point2dIntersect(self,e): #Return the 2d intersection of the two edges. Intersection is placed at the same z as self.a p = self.a[:2] q = e.a[:2] r = (self.b-self.a)[:2] s = (e.b-e.a)[:2] t=sp.cross(q-p,s)/sp.cross(r,s) u = sp.cross(q-p,r)/sp.cross(r,s) if t>=0 and t<=1 and u>=0 and u<=1: intersect = p+t*r return sp.array([intersect[0],intersect[1],self.a[2]]) return None
def getLocalCoords(self, coords): """ Input: coords = matrix of global coordinates Output: coords = matrix of coordinates in local coordinates """ if self.ndim == 1 and np.size(coords, axis=1) == 2: # Transformation matrix dx_dy = coords[1, :] - coords[0, :] i_bar = dx_dy / norm(dx_dy) j_bar = i_bar.dot([[0, 1], [-1, 0]]) Gamma = np.array([i_bar, j_bar]) # Transform into local coordinates coords = Gamma @ coords.transpose() coords = coords.transpose() # Remove y-coordinates return coords[:, 0] elif self.ndim == 1 and np.size(coords, axis=1) == 3: # Transformation matrix i_bar = coords[1, :] - coords[0, :] # [dx, dy, dz] j_bar = np.array([0, 1, 0]) # Assume j-bar points upwards k_bar = np.cross(i_bar, j_bar) Gamma = np.array(gram_schmidt(i_bar, j_bar, k_bar)) # Transform into local coordinates coords = Gamma @ coords.transpose() coords = coords.transpose() # Remove y and z-coordinates return coords[:, 0] elif self.ndim == 2 and np.size(coords, axis=1) == 3: warn("Unverified result, please verify!!") # Transformation matrix i_bar = coords[2, :] - coords[1, :] j_bar = coords[3, :] - coords[2, :] k_bar = np.cross(i_bar, j_bar) Gamma = np.array(gram_schmidt(i_bar, j_bar, k_bar)) # Transform into local coordinates coords = Gamma @ coords.transpose() coords = coords.transpose() # Remove z-coordinates return coords[:, 0:2] elif self.ndim == 3 and np.size(coords, axis=1) > 3: raise ValueError("Element dimensions exceeded !")
def llg_cartesian_residual(magnetic_parameters, t, m_cart, dmdt_cart): # Extract the parameters alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma Ms = magnetic_parameters.Ms h_eff = heff(magnetic_parameters, t, m_cart) residual = ((alpha/Ms) * sp.cross(m_cart, dmdt_cart) - gamma * sp.cross(m_cart, h_eff) - dmdt_cart) return residual
def check2dintersect(self, e): #Check if the two edges intersect in 2d. Ignores Z. p = self.a[:2] q = e.a[:2] r = (self.b - self.a)[:2] s = (e.b - e.a)[:2] if sp.cross(r, s) == 0: return False t = sp.cross(q - p, s) / sp.cross(r, s) u = sp.cross(q - p, r) / sp.cross(r, s) if t >= 0 and t <= 1 and u >= 0 and u <= 1: return True return False
def point2dIntersect(self, e): #Return the 2d intersection of the two edges. Intersection is placed at the same z as self.a p = self.a[:2] q = e.a[:2] r = (self.b - self.a)[:2] s = (e.b - e.a)[:2] t = sp.cross(q - p, s) / sp.cross(r, s) u = sp.cross(q - p, r) / sp.cross(r, s) if t >= 0 and t <= 1 and u >= 0 and u <= 1: intersect = p + t * r return sp.array([intersect[0], intersect[1], self.a[2]]) return None
def llg_residual_integrand(t, x, m, dmdt, dmdx, test, dtestdx, happ, dampc): # Componentwise dot product of mxdmdx and dtestdx (pretending we # have vector test functions) mxdmdx = sp.dot(skew(m), dmdx) exch = array([sp.dot(r_mxdmdx, dtestdx) for r_mxdmdx in mxdmdx]) # exch = sp.zeros(3) # exch[0] = sp.dot(mxdmdx[0], dtestdx[0]) # exch[1] = sp.dot(mxdmdx[1], dtestdx[1]) # exch[2] = sp.dot(mxdmdx[2], dtestdx[2]) return sp.dot(dmdt, test) + sp.dot(sp.cross(m, happ), test) - dampc * sp.dot(sp.cross(m, dmdt), test) - exch
def plot_angular_momentum(angular_momenta): """ Plot the angular momentum of the motor against time. """ angular_momentum_norm = 0.0 for particle in sim.get_particles_excluding_container(): r = particle.get_current_position() p = particle.get_vector_momentum_received() # Pad the vectors by adding zeroes at the end. p_cross = li.pad(p, (0, 1), "constant") r_cross = li.pad(r, (0, 1), "constant") angular_momentum_norm += spl.norm(sp.cross(r_cross, p_cross)) # This value is useful because it gives context to the angular momentum # of the motor. print "Total angular momentum norm:", angular_momentum_norm angular_momenta = sp.array(angular_momenta) z_component = angular_momenta[:, -1] times = sp.arange(0, len(z_component), 1) * si.Simulation.time_step am_figure = pl.figure("Angular Momentum") am_figure.gca().set_xlabel(r"$\rm{Time\ /\ s}$") am_figure.gca().set_ylabel(r"$\rm{Angular\ Momentum\ /\ Nms}^{-1}$") am_figure.gca().plot(times, z_component) am_figure.savefig("angular_momentum_2d.png")
def can2riri(can_file_path, Ncontent = 0.1): #open file f = file(can_file_path, 'r') tab_geom = IOtable.table_txt(f) f.close() for i in range(1, len(tab_geom)-1): tab_geom[i][5:] = map(float, tab_geom[i][5:]) tab = [] for i in range(1, len(tab_geom)-1): #face center p1, p2, p3 = array([tab_geom[i][5:8]]), array([tab_geom[i][8:11]]), array([tab_geom[i][11:]]) center = tri_ortho(p1,p2,p3) #triangle surface (m2) s = triangle_area(p1, p2, p3) #elevation u = p2-p1 v = p3-p1 c = scipy.cross (u, v)[0] norm = c/norme_v(c) #id plante id = int(tab_geom[i][2][-3:])# va chercher dans les 3 derniers chiffres # sortie tab.append([id, center[0][0], center[0][1], center[0][2], s ]) tab = IOtable.t_list(tab) entite, x, y, z, surf = tab[0], tab[1], tab[2], tab[3], tab[4] n = [Ncontent]*len(entite) #N fixe en entree entite = [0]*len(x) #pour forcer tout de la meme entite return array(entite), array(x)/100.+0.53, array(y)/100.+0.17, array(z)/100.+0.03, array(surf)/10000., array(n) #passe de cm2 en m2, cm en m #+ recale a l'origine [0,0,0]
def shell(shellCount,extrusionWidth,layer): #This also needs to be made smarter. It can't handle intersections that change the order of the loop yet. ''' This function takes a layer which has been through straighten and order, and forms the perimeter lines which will actually be extruded from the loops. Stores perimeter lines in layer.shells as a list of shell groups. Each shell group is a list of shells with varying insets. Shells are ordered lists of edges similar to loops. ''' insets = [n*extrusionWidth+extrusionWidth/2.0 for n in range(shellCount)] shellGroups = [] for loop in layer.loops: shells = [] for inset in insets: shell = [] for edge in loop: left = sp.cross(sp.array([0,0,1]),edge.dir[0]) shell.append(Basics.edge(edge.a+left*inset,edge.b+left*inset)) for index in range(len(shell)-1): activeEdge = shell[index] nextEdge = shell[index+1] if activeEdge.check2dintersect(nextEdge): intersect = activeEdge.point2dIntersect(nextEdge) shell[index]=Basics.edge(activeEdge.a,intersect) shell[index+1]=Basics.edge(intersect,nextEdge.b) activeEdge = shell[-1] nextEdge = shell[0] if activeEdge.check2dintersect(nextEdge): intersect = activeEdge.point2dIntersect(nextEdge) shell[-1]=Basics.edge(activeEdge.a,intersect) shell[0]=Basics.edge(intersect,nextEdge.b) shells.append(shell) shellGroups.append(shells) layer.shells = shellGroups
def interp_dir_r(self, theta_phi, r): try: R = np.sqrt(np.sum(r * r, axis=-1)) R = R.reshape([1, -1]) _hat_x = self.pgf_gen.a1.reshape([1, -1]) _hat_y = self.pgf_gen.a2.reshape([1, -1]) _hat_z = np.cross(_hat_x, _hat_y) _hat_z = _hat_z / np.sqrt(np.sum(_hat_z * _hat_z)) rx = np.sum(r * _hat_x, axis=-1) / np.sum(_hat_x * _hat_x) ry = np.sum(r * _hat_y, axis=-1) / np.sum(_hat_y * _hat_y) rz = np.sum(r * _hat_z, axis=-1) r_loc = np.vstack([rx, ry, rz]) r_loc = r_loc.transpose() theta_phi_copy = theta_phi.reshape([-1, 2]) pts_dir_r = np.array( [np.hstack([xx, yy]) for xx in theta_phi_copy for yy in r_loc]) result = self.interp(pts_dir_r) return result.reshape([theta_phi.shape[0], r.shape[0] ]) / R * np.exp(-1j * self.k * R) except ValueError as ve: print ve raise except IndexError as ie: print ie print pts_dir_r.shape print r_loc.shape print theta_phi.shape raise pass
def rot90(vectors, axes): """Rotate an array of vectors through 90 degrees around an array of axes. Parameters ---------- vectors : array An array of row vectors (m x 3). axes : array An array of axes (m x 3). Returns ------- array Matrix of row vectors (m x 3). Notes ----- Computes the cross product of each row vector with its corresponding axis, and then rescales the resulting normal vectors to match the length of the original row vectors. Examples -------- >>> vectors = array([[2, 1, 3], [2, 6, 8]]) >>> axes = array([[7, 0, 1], [4, 4, 2]]) >>> rot90(vectors, axes) [[-0.18456235 -3.50668461 1.29193644] [ 5.3748385 -7.5247739 4.2998708 ]] """ return normalizerow(cross(axes, vectors)) * normrow(vectors)
def orbitVel(self, r, t, antiCW): #legacy #velocity for a circular orbit around body at vector r #Anticlockwise is default, enter -1 for clockwise v = sp.sqrt(G * self.mass / mag(self.orbitPos(t), r)) #mag v=(GM/R)^0.5 rHat = sp.append(unitV(self.orbitPos(t), r), 0) #make it 3D vHat = antiCW * sp.cross(zHat, rHat)[:2] #return to 2D return v * vHat
def perp(vecArra): # Return the cross product of the positive z-directed unit vector with each # row vector input. The input and output vectors are in the xy-plane, and # the z-components aren't included. z3 = sp.array([0, 0, 1]) per = sp.cross(z3, vecArra) return per[:, 0:2]
def grad(V, F, rtype='array'): """Construct the gradient operator of a trianglular mesh. Parameters ---------- V : array Vertex coordinates of the mesh. F : array Face vertex indices of the mesh. rtype : {'array', 'csc', 'csr', 'coo', 'list'} Format of the result. Returns ------- array-like Depending on rtype return type. Notes ----- The gradient operator is fully determined by the connectivity of the mesh and the coordinate difference vectors associated with the edges """ v = V.shape[0] f = F.shape[0] f0 = F[:, 0] # Index of first vertex of each face f1 = F[:, 1] # Index of second vertex of each face f2 = F[:, 2] # Index of last vertex of each face v01 = V[f1, :] - V[f0, :] # Vector from vertex 0 to 1 for each face v12 = V[f2, :] - V[f1, :] # Vector from vertex 1 to 2 for each face v20 = V[f0, :] - V[f2, :] # Vector from vertex 2 to 0 for each face n = cross(v12, v20) # Normal vector to each face A2 = normrow(n) # Length of normal vector is twice the area of the face A2 = tile(A2, (1, 3)) u = normalizerow(n) # Unit normals for each face v01_ = divide(rot90(v01, u), A2) # Vector perpendicular to v01, normalized by A2 v20_ = divide(rot90(v20, u), A2) # Vector perpendicular to v20, normalized by A2 i = hstack(( # Nonzero rows 0 * f + tile(arange(f), (1, 4)), 1 * f + tile(arange(f), (1, 4)), 2 * f + tile(arange(f), (1, 4)))).flatten() j = tile(hstack((f1, f0, f2, f0)), (1, 3)).flatten() # Nonzero columns data = hstack(( hstack((v20_[:, 0], -v20_[:, 0], v01_[:, 0], -v01_[:, 0])), hstack((v20_[:, 1], -v20_[:, 1], v01_[:, 1], -v01_[:, 1])), hstack((v20_[:, 2], -v20_[:, 2], v01_[:, 2], -v01_[:, 2])), )).flatten() G = coo_matrix((data, (i, j)), shape=(3 * f, v)) if rtype == 'array': return G.toarray() elif rtype == 'csr': return G.tocsr() elif rtype == 'csc': return G.tocsc() elif rtype == 'coo': return G else: return G
def __divide(self): """ """ print("TO-DO: DOCUMENTATION") print( "TO-DO: COMPROBAR LV Y RV, QUE SEAN ESAS LAS PARTES DEL CLIPPING PLANE" ) # if self.anterior is None or self.posterior is None or self.apex is None: # self.__calc_landmarks() clip = vtk.vtkClipPolyData() plane = vtk.vtkPlane() anterior = self.anterior posterior = self.posterior apex = self.apex O = asarray(self.polydata.GetPoint(self.apex)) A = asarray(self.polydata.GetPoint(self.anterior)) B = asarray(self.polydata.GetPoint(self.posterior)) OA = A - O OB = B - O # Clipping plane 1 normal = cross(OA, OB) / norm(cross(OA, OB)) plane.SetOrigin(self.polydata.GetPoint(self.apex)) plane.SetNormal((normal[0], normal[1], normal[2])) clip.SetClipFunction(plane) clip.SetInputData(self.polydata) clip.Update() self.__RV_polydata = clip.GetOutput() # Clipping plane 2 normal = cross(OB, OA) / norm(cross(OB, OA)) plane.SetOrigin(self.polydata.GetPoint(self.apex)) plane.SetNormal((normal[0], normal[1], normal[2])) clip.SetClipFunction(plane) clip.SetInputData(self.polydata) clip.Update() self.__LV_polydata = clip.GetOutput()
def check_dfdm(m_cart): """Compare dfdm function with finite differenced dfdm.""" # Some parameters magnetic_parameters = utils.MagParameters() t = 0.3 # Use LL to get dmdt: alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma Hvec = magnetic_parameters.Hvec(None) Ms = magnetic_parameters.Ms h_eff = Hvec dmdt_cart = (gamma/(1+alpha**2)) * sp.cross(m_cart, h_eff) \ - (alpha*gamma/((1+alpha**2)*Ms)) * sp.cross(m_cart, sp.cross( m_cart, h_eff)) # Calculate with function dfdm_func = llg.llg_cartesian_dfdm( magnetic_parameters, t, m_cart, dmdt_cart) def f(t, m_cart, dmdt_cart): # f is the residual + dm/dt (see notes 27/2/13) return llg.llg_cartesian_residual(magnetic_parameters, t, m_cart, dmdt_cart) + dmdt_cart # FD it dfdm_fd = sp.zeros((3, 3)) r = f(t, m_cart, dmdt_cart) delta = 1e-8 for i, m in enumerate(m_cart): m_temp = sp.array(m_cart).copy() # Must force a copy here m_temp[i] += delta r_temp = f(t, m_temp, dmdt_cart) r_diff = (r_temp - r)/delta for j, r_diff_j in enumerate(r_diff): dfdm_fd[i][j] = r_diff_j print dfdm_fd # Check the max of the difference utils.assert_almost_zero(sp.amax(dfdm_func - dfdm_fd), 1e-6)
def triangles_area(triangles, vertices): if scipy.sparse.__name__ in type(vertices).__module__: vertices = vertices.toarray() e1 = vertices[triangles[:, 1]] - vertices[triangles[:, 0]] e2 = vertices[triangles[:, 2]] - vertices[triangles[:, 0]] normal = scipy.cross(e1, e2) tri_area = 0.5 * length(normal) return tri_area
def build(self): try: x = self.x_sample y = self.y_sample z = self.z_sample theta = self.k_dir_theta phi = self.k_dir_phi _hat_x = self.pgf_gen.a1 _hat_y = self.pgf_gen.a2 _hat_z = np.cross(_hat_x, _hat_y) _hat_z = _hat_z / np.sqrt(np.sum(_hat_z * _hat_z)) r = x.reshape([-1,1,1,1])*_hat_x\ +y.reshape([1,-1,1,1])*_hat_y\ +z.reshape([1,1,-1,1])*_hat_z r_shape_orign = r.shape r_flat = r.reshape([-1, 3]) # 几何格子--3D _hat_kx = np.array([1, 0, 0]) _hat_ky = np.array([0, 1, 0]) _hat_kz = np.array([0, 0, 1]) k_dir = np.sin(theta.reshape([-1,1,1]))*np.cos(phi.reshape([1,-1,1]))*_hat_kx\ +np.sin(theta.reshape([-1,1,1]))*np.sin(phi.reshape([1,-1,1]))*_hat_ky\ +np.cos(theta.reshape([-1,1,1]))*np.ones_like(phi.reshape([1,-1,1]))*_hat_kz k_dir_shape_orign = k_dir.shape # print k_dir k_dir_flat = k_dir.reshape([-1, 3]) #角度格子--2D R2_row = np.sum(r_flat * r_flat, axis=-1).reshape([-1]) r_flat = np.array([ r_flat[ii] \ if R2_row[ii]>0 else np.array([0,0,1.e-5]) \ for ii in xrange(r_flat.shape[0])])# 去掉奇异性 try: with warnings.catch_warnings(): warnings.simplefilter('always') R_row = np.sqrt(R2_row) self.data = self.pgf_gen.pgf(k_dir_flat, r_flat) * R_row * np.exp( 1j * self.k * R_row) self.data = self.data.reshape(*(k_dir_shape_orign[:-1] + r_shape_orign[:-1])) except Exception as e: print e raise data_slim_shape = [xx for xx in self.data.shape if xx > 1] cord_ = (theta, phi, x, y, z) cord_slim = [ cord_[i] for i in xrange(len(self.data.shape)) if self.data.shape[i] > 1 ] self.my_interpolating_function = RegularGridInterpolator( cord_slim, self.data.reshape(data_slim_shape)) except Exception as e: print e raise
def CheckSide(vertices,point): t1,t2,t3,t4=vertices p=point side_1=t2-t1 side_2=t3-t1 normal=sci.cross(side_1,side_2) ref_vector=t4-t1 ref_sign=sci.dot(normal,ref_vector) point_vector=p-t1 point_sign=sci.dot(normal,point_vector) if(sci.sign(ref_sign)==sci.sign(point_sign)): return 1
def check_dfdm(m_cart): """Compare dfdm function with finite differenced dfdm.""" # Some parameters magnetic_parameters = utils.MagParameters() t = 0.3 # Use LL to get dmdt: alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma Hvec = magnetic_parameters.Hvec(None) Ms = magnetic_parameters.Ms h_eff = Hvec dmdt_cart = (gamma/(1+alpha**2)) * sp.cross(m_cart, h_eff) \ - (alpha*gamma/((1+alpha**2)*Ms)) * sp.cross(m_cart, sp.cross( m_cart, h_eff)) # Calculate with function dfdm_func = llg.llg_cartesian_dfdm(magnetic_parameters, t, m_cart, dmdt_cart) def f(t, m_cart, dmdt_cart): # f is the residual + dm/dt (see notes 27/2/13) return llg.llg_cartesian_residual(magnetic_parameters, t, m_cart, dmdt_cart) + dmdt_cart # FD it dfdm_fd = sp.zeros((3, 3)) r = f(t, m_cart, dmdt_cart) delta = 1e-8 for i, m in enumerate(m_cart): m_temp = sp.array(m_cart).copy() # Must force a copy here m_temp[i] += delta r_temp = f(t, m_temp, dmdt_cart) r_diff = (r_temp - r) / delta for j, r_diff_j in enumerate(r_diff): dfdm_fd[i][j] = r_diff_j print dfdm_fd # Check the max of the difference utils.assert_almost_zero(sp.amax(dfdm_func - dfdm_fd), 1e-6)
def __init__(self,points): #Points should be a list of 3x1 arrays. self.points = sp.array(points) #Create edges through the list of points. self.edges = [] for i in range(3): #Silly and intentionally pythonic way to do this. Edges are 1-2, 2-3, 3-1. self.edges.append(edge(self.points[i],self.points[i-2])) self.normal = unit(sp.array([sp.cross(self.edges[0].dir[0],self.edges[1].dir[0]),[0,0,0]])) self.plane = plane(self.points[0],self.normal)
def llg_cartesian_residual(magnetic_parameters, t, m_cart, dmdt_cart): # Extract the parameters alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma Ms = magnetic_parameters.Ms Hk_vec = magnetic_parameters.Hk_vec(m_cart) # Nasty hack to allow Hvec functions or vectors try: Hvec = magnetic_parameters.Hvec(t) except TypeError: Hvec = magnetic_parameters.Hvec h_eff = Hvec # + Hk residual = ((alpha / Ms) * sp.cross(m_cart, dmdt_cart) - gamma * sp.cross(m_cart, h_eff) - dmdt_cart) return residual
def contains(self,p): #Checks whether a point is on self. #point is inside tri iff ABxAP.unit==BCxBP.unit==CAxCP.unit #Defining points just to make it neater. p1 = self.points[0] p2 = self.points[1] p3 = self.points[2] #Define the 6 vectors AB=sp.array([ p1[0]-p2[0], p1[1]-p2[1], p1[2]-p2[2]]) BC=sp.array([ p2[0]-p3[0], p2[1]-p3[1], p2[2]-p3[2]]) CA=sp.array([ p3[0]-p1[0], p3[1]-p1[1], p3[2]-p1[2]]) AP=sp.array([ p1[0]-p[0], p1[1]-p[1], p1[2]-p[2]]) BP=sp.array([ p2[0]-p[0], p2[1]-p[1], p2[2]-p[2]]) CP=sp.array([ p3[0]-p[0], p3[1]-p[1], p3[2]-p[2]]) #Find cross products c1 = unit(sp.cross(AB,AP)) c2 = unit(sp.cross(BC,BP)) c3 = unit(sp.cross(CA,CP)) return np.array_equal(c1,c2) and np.array_equal(c1,c3)
def contains(self, p): #Checks whether a point is on self. #point is inside tri iff ABxAP.unit==BCxBP.unit==CAxCP.unit #Defining points just to make it neater. p1 = self.points[0] p2 = self.points[1] p3 = self.points[2] #Define the 6 vectors AB = sp.array([p1[0] - p2[0], p1[1] - p2[1], p1[2] - p2[2]]) BC = sp.array([p2[0] - p3[0], p2[1] - p3[1], p2[2] - p3[2]]) CA = sp.array([p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]) AP = sp.array([p1[0] - p[0], p1[1] - p[1], p1[2] - p[2]]) BP = sp.array([p2[0] - p[0], p2[1] - p[1], p2[2] - p[2]]) CP = sp.array([p3[0] - p[0], p3[1] - p[1], p3[2] - p[2]]) #Find cross products c1 = unit(sp.cross(AB, AP)) c2 = unit(sp.cross(BC, BP)) c3 = unit(sp.cross(CA, CP)) return sp.array_equal(c1, c2) and sp.array_equal(c1, c3)
def recompute_alpha_varying_fields_at_midpoint(sph_start, sph_end, t_start, t_end, mag_params): """ Compute effective damping from change in magnetisation and change in applied field. See notes 30/7/13 pg 5. Derivatives are estimated using midpoint method finite differences, all values are computed at the midpoint (m = (m_n + m_n-1)/2, similarly for t). """ # Only for normalised problems! assert(mag_params.Ms == 1) # Get some values dt = t_end - t_start t = (t_end + t_start)/2 m = (sp.array(utils.sph2cart(sph_end)) + sp.array(utils.sph2cart(sph_start)))/2 h_eff = heff(mag_params, t, m) mxh = sp.cross(m, h_eff) # Finite difference derivatives dhadt = (mag_params.Hvec(t_end) - mag_params.Hvec(t_start))/dt dedt = (llg_state_energy(sph_end, mag_params, t_end) - llg_state_energy(sph_start, mag_params, t_start) )/dt dmdt = (sp.array(utils.sph2cart(sph_end)) - sp.array(utils.sph2cart(sph_start)))/dt # utils.assert_almost_equal(dedt, sp.dot(m_cart_end, dhadt) # + sp.dot(dmdt, h_eff_end), 1e-2) # print(sp.dot(m_cart_end, dhadt), dedt) # Calculate alpha itself using the forumla derived in notes alpha = -((dedt + sp.dot(m, dhadt)) / (sp.dot(h_eff, sp.cross(m, dmdt)))) return alpha
def llg_cartesian_residual(magnetic_parameters, t, m_cart, dmdt_cart): # Extract the parameters alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma Ms = magnetic_parameters.Ms Hk_vec = magnetic_parameters.Hk_vec(m_cart) # Nasty hack to allow Hvec functions or vectors try: Hvec = magnetic_parameters.Hvec(t) except TypeError: Hvec = magnetic_parameters.Hvec h_eff = Hvec # + Hk residual = ((alpha/Ms) * sp.cross(m_cart, dmdt_cart) - gamma * sp.cross(m_cart, h_eff) - dmdt_cart) return residual
def rotation_matrix_from_vectors(a, b): a = scipy.asarray(a) b = scipy.asarray(b) a /= scipy.linalg.norm(a) b /= scipy.linalg.norm(b) v = scipy.cross(a, b) c = scipy.dot(a, b) s = scipy.linalg.norm(v) I = scipy.identity(3, dtype='f8') k = scipy.array([[0., -v[2], v[1]], [v[2], 0., -v[0]], [-v[1], v[0], 0.]]) if s == 0.: return I return I + k + scipy.matmul(k, k) * ((1. - c) / (s**2))
def set_orientation(self, value): if isinstance(value, (list, N.ndarray)): value = N.asarray(value) # find quaternion for rotation n = N.cross([0, 0, 1], unit_vector(value[:3])) phi = N.arccos(N.dot([0, 0, 1], unit_vector(value[:3]))) self._orientation = quaternion_about_axis(phi, n) elif value is True: # random orientation self._orientation = random_quaternion() else: # other stuff goes no orientation self._orientation = False
def get_tri_area(pts): """ Given a list of coords for 3 points, Compute the area of this triangle. Args: pts: [a, b, c] three points """ a, b, c = pts[0], pts[1], pts[2] v1 = np.array(b) - np.array(a) v2 = np.array(c) - np.array(a) area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2) return area_tri
def repLocFromJac(self, rep_loc=None, vec_xi=None): #to do: vectorization listZ = [sp.cross(J[0], J[1]) for J in self.JacobianMatrix] listX = [] listY = [] if rep_loc is None: listZ = [Z / linalg.norm(Z) for Z in listZ ] #direction perpendiculaire au plan tangeant x = sp.array([1, 0, 0]) y = sp.array([0, 1, 0]) for Z in listZ: X = x - Z[0] * Z normX = linalg.norm(X) if normX != 0: listX.append(X / normX) listY.append(sp.cross(Z, listX[-1])) else: listY.append(y - Z[1] * Z) listY[-1] /= linalg.norm(listY[-1]) listX.append(sp.cross(listY[-1], Z)) else: rep_pg = self.interpolateLocalFrame( rep_loc, vec_xi) #interpolation du repère local aux points de gauss for k, Z in enumerate(listZ): x = rep_pg[k][0] y = rep_pg[k][1] X = x - sp.dot(x, Z) * Z normX = linalg.norm(X) if normX != 0: listX.append(X / normX) listY.append(sp.cross(Z, listX[-1])) else: listY.append(y - sp.dot(y, Z) * Z) listY[-1] /= linalg.norm(listY[-1]) listX.append(sp.cross(listY[k], Z)) return sp.array([[listX[k], listY[k], listZ[k]] for k in range(len(listZ))])
def recompute_alpha_varying_fields( sph_start, sph_end, t_start, t_end, mag_params): """ Compute effective damping from change in magnetisation and change in applied field. See notes 30/7/13 pg 5. Derivatives are estimated using BDF1 finite differences. """ # Only for normalised problems! assert(mag_params.Ms == 1) # Get some values dt = t_end - t_start m_cart_end = utils.sph2cart(sph_end) h_eff_end = heff(mag_params, t_end, m_cart_end) mxh = sp.cross(m_cart_end, h_eff_end) # Finite difference derivatives dhadt = (mag_params.Hvec(t_start) - mag_params.Hvec(t_end))/dt dedt = (llg_state_energy(sph_end, mag_params, t_end) - llg_state_energy(sph_start, mag_params, t_start) )/dt dmdt = (sp.array(utils.sph2cart(sph_start)) - sp.array(m_cart_end))/dt utils.assert_almost_equal(dedt, sp.dot(m_cart_end, dhadt) + sp.dot(dmdt, h_eff_end), 1e-2) # print(sp.dot(m_cart_end, dhadt), dedt) # Calculate alpha itself using the forumla derived in notes alpha = ((dedt - sp.dot(m_cart_end, dhadt)) / (sp.dot(h_eff_end, sp.cross(m_cart_end, dmdt)))) return alpha
def get_reciprocal(positions, dihedral_atoms): """ In dihedral angle calculation, see if angle is the reciprocal or not. """ ii, jj, kk, ll = dihedral_atoms # vector 0->1, 1->2, 2->3 and their normalized cross products: a = positions[jj] - positions[ii] b = positions[kk] - positions[jj] c = positions[ll] - positions[kk] bxa = sp.cross(b, a) if sp.vdot(bxa, c) > 0: return True else: return False
def eval_plane(s, plane, pt): from scipy import cross, dot pt1 = plane[0] pt2 = plane[1] pt3 = plane[2] V12 = tuple(array(pt2) - array(pt1)) V13 = tuple(array(pt3) - array(pt1)) Vn = tuple(cross(V12, V13)) V01 = tuple(array(pt1) - array(pt)) return dot(V01, Vn)
def __init__(self, points): #Points should be a list of 3x1 arrays. self.points = sp.array(points) #Create edges through the list of points. self.edges = [] for i in range(3): #Silly and intentionally pythonic way to do this. Edges are 1-2, 2-3, 3-1. self.edges.append(edge(self.points[i], self.points[i - 2])) self.normal = unit( sp.array([ sp.cross(self.edges[0].dir[0], self.edges[1].dir[0]), [0, 0, 0] ])) self.plane = plane(self.points[0], self.normal)
def eval_plane(s, plane, pt): from scipy import cross, dot pt1 = plane[0] pt2 = plane[1] pt3 = plane[2] V12 = tuple( array(pt2) - array(pt1) ) V13 = tuple( array(pt3) - array(pt1) ) Vn = tuple( cross(V12,V13) ) V01 = tuple( array(pt1) - array(pt) ) return dot(V01,Vn)
def low_accuracy_recompute_alpha_varying_fields( sph_start, sph_end, t_start, t_end, mag_params): """ Compute effective damping from change in magnetisation and change in applied field. From Nonlinear magnetization dynamics in nanosystems eqn (2.15). See notes 30/7/13. Derivatives are estimated using BDF1 finite differences. """ # Only for normalised problems! assert(mag_params.Ms == 1) # Get some values dt = t_end - t_start m_cart_end = utils.sph2cart(sph_end) h_eff_end = heff(mag_params, t_end, m_cart_end) mxh = sp.cross(m_cart_end, h_eff_end) # Finite difference derivatives dhadt = (mag_params.Hvec(t_start) - mag_params.Hvec(t_end))/dt assert(all(dhadt == 0)) # no field for now dedt = (llg_state_energy(sph_end, mag_params, t_end) - llg_state_energy(sph_start, mag_params, t_start) )/dt sigma = sp.dot(mxh, mxh) / (dedt + sp.dot(m_cart_end, dhadt)) possible_alphas = sp.roots([1, sigma, 1]) a = (-sigma + sqrt(sigma**2 - 4))/2 b = (-sigma - sqrt(sigma**2 - 4))/2 possible_alphas2 = [a, b] utils.assert_list_almost_equal(possible_alphas, possible_alphas2) print(sigma, possible_alphas) def real_and_positive(x): return sp.isreal(x) and x > 0 alphas = filter(real_and_positive, possible_alphas) assert(len(alphas) == 1) return sp.real(alphas[0])
def rotmat_from_u2v(u,v): """ Return the rotation matrix associated with rotation of vector u onto vector v. Euler-Rodrigues formula. """ u, v = u / LA.norm(u), v / LA.norm(v) axis = sp.cross(u,v) theta = sp.arcsin(LA.norm(axis)) axis = axis/LA.norm(axis) # math.sqrt(np.dot(axis, axis)) a = sp.cos(theta/2) b, c, d = -axis * sp.sin(theta/2) aa, bb, cc, dd = a*a, b*b, c*c, d*d bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)], [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)], [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def straighten(layer): #Changes all the edges in layer so that if the vector from a to b is regarded as forward, the inside of mesh is always to the left. #layer is a list of edges outputted by mesh.chop. It is NOT a layer. newEdges = [] startLen = len(layer.borders) for edge in layer.borders: right = sp.cross(edge.dir[0],sp.array([0,0,1])) testPoint = edge.midpoint() - 0.0001 * right #A testpoint which is a miniscule distance to the left of the line. if layer.mesh.contains(testPoint): newEdges.append(edge) #print "Did not flip" else: #print "Flipped" newEdges.append(Basics.edge(edge.b,edge.a)) if startLen!=len(newEdges): print "straighten procedure has lost edges." layer.borders = newEdges
def calc_vol2(X, T): minz = X[:,2].min() X[:,2] -= minz N = X.shape[0] / 6 sc = [-1, 1, -1, 1, -1, 1] V = 0 for i in range(6): i1 = i * N i2 = (i+1) * N for t in T[i1:i2]: x = X[t,:] h = x[:,2].mean(0) v1 = morphic.utils.vector(x[0,:2], x[1,:2]) v2 = morphic.utils.vector(x[0,:2], x[2,:2]) a = 0.5 * morphic.utils.length(scipy.cross(v1, v2)) #print a V += sc[i] * a * h return V
def iscoplanar(coords): r''' Determines if given pores are coplanar with each other Parameters ---------- coords : array_like List of pore coords to check for coplanarity. At least 3 pores are required. Returns ------- A boolean value of whether given points are coplanar (True) or not (False) ''' coords = _sp.array(coords, ndmin=1) if _sp.shape(coords)[0] < 3: raise Exception('At least 3 input pores are required') Px = coords[:, 0] Py = coords[:, 1] Pz = coords[:, 2] # Do easy check first, for common coordinate if _sp.shape(_sp.unique(Px))[0] == 1: return True if _sp.shape(_sp.unique(Py))[0] == 1: return True if _sp.shape(_sp.unique(Pz))[0] == 1: return True # Perform rigorous check using vector algebra n1 = _sp.array((Px[1] - Px[0], Py[1] - Py[0], Pz[1] - Pz[0])).T n2 = _sp.array((Px[2] - Px[1], Py[2] - Py[1], Pz[2] - Pz[1])).T n = _sp.cross(n1, n2) r = _sp.array((Px[1:-1] - Px[0], Py[1:-1] - Py[0], Pz[1:-1] - Pz[0])) n_dot = _sp.dot(n, r) if _sp.sum(n_dot) == 0: return True else: return False
def voronoi(network, geometry, **kwargs): r""" Update the throat normals from the voronoi vertices """ verts = geometry["throat.vertices"] value = sp.ndarray([len(verts), 3]) for i in range(len(verts)): if len(sp.unique(verts[i][:, 0])) == 1: verts_2d = sp.vstack((verts[i][:, 1], verts[i][:, 2])).T elif len(sp.unique(verts[i][:, 1])) == 1: verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 2])).T else: verts_2d = sp.vstack((verts[i][:, 0], verts[i][:, 1])).T hull = ConvexHull(verts_2d, qhull_options='QJ Pp') sorted_verts = verts[i][hull.vertices] v1 = sorted_verts[1]-sorted_verts[0] v2 = sorted_verts[-1]-sorted_verts[0] value[i] = sp.cross(v1, v2) return value
def iscoplanar(coords): r""" Determines if given pores are coplanar with each other Parameters ---------- coords : array_like List of pore coords to check for coplanarity. At least 3 pores are required. Returns ------- A boolean value of whether given points are colplanar (True) or not (False) """ coords = _sp.array(coords, ndmin=1) if _sp.shape(coords)[0] < 3: raise Exception("At least 3 input pores are required") Px = coords[:, 0] Py = coords[:, 1] Pz = coords[:, 2] # Do easy check first, for common coordinate if _sp.shape(_sp.unique(Px))[0] == 1: return True if _sp.shape(_sp.unique(Py))[0] == 1: return True if _sp.shape(_sp.unique(Pz))[0] == 1: return True # Perform rigorous check using vector algebra n = _sp.array((Px - Px[0], Py - Py[0], Pz - Pz[0])).T n0 = _sp.array((Px[-1] - Px[0], Py[-1] - Py[0], Pz[-1] - Pz[0])).T n_cross = _sp.cross(n0, n) n_dot = _sp.multiply(n0, n_cross) if _sp.sum(_sp.absolute(n_dot)) == 0: return True else: return False
def loadbinfodcm(filename,spm_converted=1): ''' Load B-value and B-vector information from the Dicom Header of a file. This assumes that the scanner is Siemens. The needed information is under the CSA Image Information Header in the Dicom header. At the moment only version SV10 is supported. This was inspired by the work of Williams & Brett using the following matlab script http://imaging.mrc-cbu.cam.ac.uk/svn/utilities/devel/cbu_dti_params.m However here we are using pydicom and then read directly from the CSA header. Input: Dicom filename Output: B_value (stored in the dicom), B_vec (B_vector calculated from stored B_matrix), G_direction(gradient direction stored in dicom), B_value_B_matrix (B value calculated from the stored in dcm B_matrix after using eigenvalue decomposition). Example: B_value, B_vec, G_direction, B_value_B_matrix = loadbinfodcm(fname) ''' #filename = '/backup/Data/Eleftherios/CBU090133_METHODS/20090227_145404/Series_003_CBU_DTI_64D_iso_1000/1.3.12.2.1107.5.2.32.35119.2009022715012276195181703.dcm' #filename = '/backup/Data/Eleftherios/CBU090133_METHODS/20090227_145404/Series_003_CBU_DTI_64D_iso_1000/1.3.12.2.1107.5.2.32.35119.2009022715073976305795724.dcm' if os.path.isfile(filename)==False: print('Filename does not exist') return data=dicom.read_file(filename) if spm_converted: y_flipper=sp.diag([1, -1, 1]) else: y_flipper=sp.eye(3) #print 'y_flipper',y_flipper orient=data.ImageOrientationPatient orient=sp.transpose(sp.reshape(orient,(2,3))) v1=sp.array([orient[0,0],orient[1,0],orient[2,0]]) v2=sp.array([orient[0,1],orient[1,1],orient[2,1]]) v3=sp.cross(v1,v2) #print 'v3',v3 orient=sp.column_stack((v1.transpose(),v2.transpose(),v3.transpose())) if lg.det(orient<0): #print('Det orient < 0') print 'Negative determinant.' orient[:,2]=-orient[:,2] #print 'orient',orient vox_to_dicom = sp.dot(orient, y_flipper) #print 'vox_to_dicom',vox_to_dicom mat = lg.inv(vox_to_dicom) #print 'mat',mat #print vox_to_dicom*vox_to_dicom csainfo=data[0x029,0x1010] #print csainfo[0:4] if csainfo[0:4]!='SV10': print 'No SV10' B_vec=sp.array([sp.NaN,sp.NaN,sp.NaN]) B_value=0 G_direction=sp.array([0.0,0.0,0.0]) B_value_B_matrix=0 return B_value, B_vec, G_direction, B_value_B_matrix start,stop=8,12 #print 'CSA Image Info' n=struct.unpack('I',csainfo[start:stop]) n=n[0] #print 'n:',n B_value=-1 B_matrix=[] G_direction=[] #Read B-related Info start=16 for i in xrange(n): rec=[] stop=start+64+4+4+4+4+4 name=struct.unpack('64ci4ciii',csainfo[start:stop]) nitems=int(name[-2]) startstore=start start =stop #print(''.join(name[0:64])) #print(''.join(name[0:25])) matstart=0 valstart=0 diffgradstart=0 if ''.join(name[0:8])=='B_matrix': matstart=startstore if ''.join(name[0:7])=='B_value': valstart=startstore if ''.join(name[0:26])== 'DiffusionGradientDirection': diffgradstart=startstore for j in xrange(nitems): xx=struct.unpack('4i',csainfo[start:start+4*4]) length=int(xx[1]) value =struct.unpack(str(length)+'c',csainfo[start+4*4:start+4*4+length]) if matstart > 0: if len(value)>0: B_matrix.append(float(''.join(value[:-1] ))) else : B_matrix.append(0.0) if valstart > 0 : if len(value)>0: B_value=float(''.join(value[:-1] )) if diffgradstart > 0 : if len(value)>0 : G_direction.append(float(''.join(value[:-1] ))) stop=start+4*4+length+(4-length%4)%4 start=stop if B_value >0: B_mat=sp.array([[B_matrix[0],B_matrix[1],B_matrix[2]], [B_matrix[1],B_matrix[3],B_matrix[4]], [B_matrix[2],B_matrix[4],B_matrix[5]]]) [vals, vecs]=lg.eigh(B_mat) dbvec = vecs[:,2] if dbvec[0] < 0: dbvec = dbvec * -1 B_vec=sp.transpose(sp.dot(mat, dbvec)) B_value_B_matrix=vals.max() else: B_vec=sp.array([0.0,0.0,0.0]) B_value=0 G_direction=sp.array([0.0,0.0,0.0]) B_value_B_matrix=0 return B_value, B_vec, G_direction, B_value_B_matrix
def _get_hull_volume(points): r""" Calculate the volume of a set of points by dividing the bounding surface into triangles and working out the volume of all the pyramid elements connected to the volume centroid """ " remove any duplicate points - this messes up the triangulation " points = _sp.asarray(misc.unique_list(np.around(points,10))) try: tri = Delaunay(points,qhull_options='QJ Pp') except _sp.spatial.qhull.QhullError: print(points) " We only want points included in the convex hull to calculate the centroid " hull_centroid = _sp.array([points[:,0].mean(),points[:,1].mean(),points[:,2].mean()]) hull_volume = 0.0 pyramid_COMs = [] for ia, ib, ic in tri.convex_hull: " Points making each triangular face " " Collection of co-ordinates of each point in this face " face_x = points[[ia,ib,ic]][:,0] face_y = points[[ia,ib,ic]][:,1] face_z = points[[ia,ib,ic]][:,2] " Average of each co-ordinate is the centroid of the face " face_centroid = [face_x.mean(),face_y.mean(),face_z.mean()] face_centroid_vector = face_centroid - hull_centroid " Vectors of the sides of the face used to find normal vector and area " vab = points[ib] - points[ia] vac = points[ic] - points[ia] vbc = points[ic] - points[ib] # used later for area #face_COM = (vab+vac)/3 " As vectors are co-planar the cross-product will produce the normal vector of the face " face_normal = _sp.cross(vab,vac) try: face_unit_normal = face_normal/_sp.linalg.norm(face_normal) except RuntimeWarning: print("Pore Volume Error:" +str(vab)+" "+str(vac)) " As triangles are orientated randomly in 3D we could either transform co-ordinates to align with a plane and perform 2D operations " " to work out the area or we could work out the lengths of each side and use Heron's formula which is easier" " Using Delaunay traingulation will always produce triangular faces but if dealing with other polygons co-ordinate transfer may be necessary " a = _sp.linalg.norm(vab) b = _sp.linalg.norm(vbc) c = _sp.linalg.norm(vac) " Semiperimeter " s = 0.5*(a+b+c) face_area = _sp.sqrt(s*(s-a)*(s-b)*(s-c)) " Now the volume of the pyramid section defined by the 3 face points and the hull centroid can be calculated " pyramid_volume = _sp.absolute(_sp.dot(face_centroid_vector,face_unit_normal)*face_area/3) " Each pyramid is summed together to calculate the total volume " hull_volume += pyramid_volume " The Centre of Mass will not be the same as the geometrical centroid " " A weighted adjustment can be calculated from the pyramid centroid and volume " vha = points[ia]-hull_centroid vhb = points[ib]-hull_centroid vhc = points[ic]-hull_centroid pCOM = ((vha+vhb+vhc)/4)*pyramid_volume pyramid_COMs.append(pCOM) if _sp.isnan(hull_volume): hull_volume = 0.0 if hull_volume>0: hull_COM = hull_centroid + _sp.mean(_sp.asarray(pyramid_COMs),axis=0)/hull_volume else: hull_COM = hull_centroid return hull_volume, hull_COM
def pca(self, points): cov_mat = points[0:3,:] * points[0:3,:].T (values, vectors) = scipy.linalg.eig(cov_mat) vectors[0:3,2] = scipy.cross(vectors[0:3,0], vectors[0:3,1]) return vectors
def find_object_frame_and_bounding_box(self, point_cloud): #get the name of the frame to use with z-axis being "up" or "normal to surface" #(the cluster will be transformed to this frame, and the resulting box z will be this frame's z) #if param is not set, assumes the point cloud's frame is such self.base_frame = rospy.get_param("~z_up_frame", point_cloud.header.frame_id) #convert from PointCloud to 4xn scipy matrix in the base_frame cluster_frame = point_cloud.header.frame_id (points, cluster_to_base_frame) = transform_point_cloud(self.tf_listener, point_cloud, self.base_frame) if points == None: return (None, None, None) #print "cluster_to_base_frame:\n", ppmat(cluster_to_base_frame) #find the lowest point in the cluster to use as the 'table height' table_height = points[2,:].min() #run PCA on the x-y dimensions to find the tabletop orientation of the cluster (shifted_points, xy_mean) = self.mean_shift_xy(points) directions = self.pca(shifted_points[0:2, :]) #convert the points to object frame: #rotate all the points about z so that the shortest direction is parallel to the y-axis (long side of object is parallel to x-axis) #and translate them so that the table height is z=0 (x and y are already centered around the object mean) y_axis = scipy.mat([directions[1][0], directions[1][1], 0.]) z_axis = scipy.mat([0.,0.,1.]) x_axis = scipy.cross(y_axis, z_axis) rotmat = scipy.matrix(scipy.identity(4)) rotmat[0:3, 0] = x_axis.T rotmat[0:3, 1] = y_axis.T rotmat[0:3, 2] = z_axis.T rotmat[2, 3] = table_height object_points = rotmat**-1 * shifted_points #remove outliers from the cluster object_points = self.remove_outliers(object_points) #find the object bounding box in the new object frame as [[xmin, ymin, zmin], [xmax, ymax, zmax]] (coordinates of opposite corners) object_bounding_box = [[0]*3 for i in range(2)] object_bounding_box_dims = [0]*3 for dim in range(3): object_bounding_box[0][dim] = object_points[dim,:].min() object_bounding_box[1][dim] = object_points[dim,:].max() object_bounding_box_dims[dim] = object_bounding_box[1][dim] - object_bounding_box[0][dim] #now shift the object frame and bounding box so that the z-axis is centered at the middle of the bounding box x_offset = object_bounding_box[1][0] - object_bounding_box_dims[0]/2. y_offset = object_bounding_box[1][1] - object_bounding_box_dims[1]/2. for i in range(2): object_bounding_box[i][0] -= x_offset object_bounding_box[i][1] -= y_offset object_points[0, :] -= x_offset object_points[1, :] -= y_offset offset_mat = scipy.mat(scipy.identity(4)) offset_mat[0,3] = x_offset offset_mat[1,3] = y_offset rotmat = rotmat * offset_mat #pdb.set_trace() #record the transforms from object frame to base frame and to the original cluster frame, #broadcast the object frame to tf, and draw the object frame in rviz unshift_mean = scipy.identity(4) unshift_mean[0,3] = xy_mean[0] unshift_mean[1,3] = xy_mean[1] object_to_base_frame = unshift_mean*rotmat object_to_cluster_frame = cluster_to_base_frame**-1 * object_to_base_frame #broadcast the object frame to tf (object_frame_pos, object_frame_quat) = mat_to_pos_and_quat(object_to_cluster_frame) self.tf_broadcaster.sendTransform(object_frame_pos, object_frame_quat, rospy.Time.now(), "object_frame", cluster_frame) return (object_points, object_bounding_box_dims, object_bounding_box, object_to_base_frame, object_to_cluster_frame)
def check_good_tilt_data(a1,a2,a3): ##DESCRIPTION ##checks 1) individual acclerometer value, and 2) the physical, mutual orthogonality of the accelerometer axes based on their respective values (a1,a2,a3). ##INPUTS ##a1,a2,a3; integers; accelerometer data (ideally, -1023 to 1023) ##OUTPUTS ##filter; integer; (1) if axes are mutually orthogonal, (0) if otherwise, or at least one accelerometer value has exceeded its allowable range ##defining maximum dot product value if two axes are perpendicular to each other threshold_dot_prod=0.05 #ATTN SENSLOPE: Please validate this value. also it might be good to move this to the config file ##internal printing options print_output_text=0 ##setting initial value of filter filter=1 ##ATTN SENSLOPE: This is the current filter for individual axis value. Add, edit, remove as needed. ##START OF CHECKING OF INDIVIDUAL AXIS VALUE temp2=(a1,a2,a3) temp1=array('i') for ax in temp2: if ax<-1023: ax=ax+4096 if ax>1223: filter=0 break elif ax>1023: ax=1023 temp1.append(ax) else:temp1.append(ax) elif ax<1024: temp1.append(ax) continue else: filter=0 break ##END OF CHECKING OF INDIVIDUAL AXIS VALUE if filter==0: return filter ##START OF MUTUAL ORTHOGONALITY CHECK ##arranges accel data into increasing values (due to precision issues)## temp_sort=np.sort(temp1) xa=temp_sort[0] ya=temp_sort[1] za=temp_sort[2] ##Assume unit sphere defined by mutually perpendicular axes i,j,k ##Define accelerometer axis inclinations from horizontal plane (i-j) and corresponding cones in unit sphere## alpha=(asin(xa/1023.0)) ##inclination from horizontal plane## xa_conew=1*cos(alpha) ##cone width, measured along i^j^ space## xa_coneh=sin(alpha) ##cone height, measured along k^## xa_k=xa_coneh xa_cone=sp.array([deg(alpha), xa_coneh, xa_conew]) beta=(asin(ya/1023.0)) ya_conew=1*cos(beta) ya_coneh=sin(beta) ya_k=ya_coneh ya_cone=sp.array([deg(beta), ya_coneh, ya_conew]) gamma=(asin(za/1023.0)) za_conew=1*cos(gamma) za_coneh=sin(gamma) za_k=za_coneh za_cone=sp.array([deg(gamma), za_coneh, za_conew]) ##arbitrarily sets x-accel axis (minimum value) along plane i^k^## xa_i=xa_conew xa_j=0 xa_k=xa_coneh xa_ax=sp.array([xa_i, xa_j, xa_k]) ##defines position of xa_ax## ##determines position of y-accel axis (intermediate value) from xa_ax and ya_coneh## ya_k=ya_coneh ##defines system of two equations## fya = lambda y: [(pow(y[0],2)+pow(y[1],2)+pow(ya_k,2)-1), ##equation of cone rim## (xa_ax[0]*y[0] + xa_ax[1]*y[1] + xa_ax[2]*ya_k)] ##equation of dot product of xa and ya = 0## ##solves for y[0] and y[1]## y0 = scipy.optimize.fsolve(fya, [0.1, 0.1]) ya_i=y0[0] ya_j=y0[1] ##defines 2 possible positions of ya_ax## ya_ax_1=sp.array([ya_i, ya_j, ya_k]) ya_ax_2=sp.array([ya_i, -ya_j, ya_k]) ##determines the appropriate ya_ax that produces a theoretical z-accel axis consistent with the sign of za## za_k=za_coneh za_ax_t=sp.cross(xa_ax,ya_ax_1) if (za_ax_t[2]+1)/(1+abs(za_ax_t[2]))==(1+za_k)/(1+abs(za_k)): ya_ax=ya_ax_1 else: ya_ax=ya_ax_2 za_ax_t=sp.cross(xa_ax,ya_ax_2) ##determines position of z-accel axis (minimum value) from xa_ax and ya_ax using dot product function## ##za_ax must be perpendicular to both xa_ax and ya_ax## za_k=za_coneh ##defines system of three equations## gza = lambda z: [ (xa_ax[0]*z[0] + xa_ax[1]*z[1] + xa_ax[2]*za_k), ##equation of dot product of xa and za = 0## (ya_ax[0]*z[0] + ya_ax[1]*z[1] + ya_ax[2]*za_k), ##equation of dot product of ya and za = 0## (z[0]**2 + z[1]**2 + za_k**2 - 1)] ##equation of cone rim## ##solving for z[0] and z[1]## z0,d,e,f= scipy.optimize.fsolve(gza, [ 0.1, 0.1, 0.1],full_output=1) za_i=z0[0] za_j=z0[1] za_ax=sp.array([za_i,za_j,za_k]) ##checking the dot products of xa_ax, ya_ax, za_ax## if abs(sp.dot(xa_ax,ya_ax))>threshold_dot_prod or abs(sp.dot(ya_ax,za_ax))>threshold_dot_prod or abs(sp.dot(za_ax,xa_ax))>threshold_dot_prod: filter=0 if print_output_text==1: np.set_printoptions(precision=2,suppress=True) print "xa: ",xa_ax, round(sqrt(sum(i**2 for i in xa_ax)),4) print "ya: ",ya_ax, round(sqrt(sum(i**2 for i in ya_ax)),4), round(sp.dot(xa_ax,ya_ax),4) print "za_t:",za_ax_t, round(sqrt(sum(i**2 for i in za_ax_t)),4), round(sp.dot(xa_ax,za_ax_t),4), round(sp.dot(ya_ax,za_ax_t),4) print "za: ",za_ax, round(sqrt(sum(i**2 for i in za_ax)),4), round(sp.dot(xa_ax,za_ax),4), round(sp.dot(ya_ax,za_ax),4), round(sp.dot(za_ax_t,za_ax),4) print abs(sp.dot(xa_ax,ya_ax)), abs(sp.dot(ya_ax,za_ax)), abs(sp.dot(za_ax,xa_ax)), filter ##END OF MUTUAL ORTHOGONALITY CHECK return filter
def get_domain_area(nodes, tris): " calcualte the area in the triangles " u = nodes[:, tris[:,1]] - nodes[:,tris[:,0]] v = nodes[:, tris[:,2]] - nodes[:,tris[:,0]] area = 1.0 / 2.0 * cross(u,v,axis=0).sum() return area