def test_distance(): # First, let's test some distances that are easy to figure out # without any spherical trig. eq1 = galsim.CelestialCoord(0. * galsim.radians, 0. * galsim.radians) # point on the equator eq2 = galsim.CelestialCoord(1. * galsim.radians, 0. * galsim.radians) # 1 radian along equator eq3 = galsim.CelestialCoord(pi * galsim.radians, 0. * galsim.radians) # antipode of eq1 north_pole = galsim.CelestialCoord(0. * galsim.radians, pi/2. * galsim.radians) # north pole south_pole = galsim.CelestialCoord(0. * galsim.radians, -pi/2. * galsim.radians) # south pole numpy.testing.assert_almost_equal(eq1.distanceTo(eq2).rad(), 1.) numpy.testing.assert_almost_equal(eq2.distanceTo(eq1).rad(), 1.) numpy.testing.assert_almost_equal(eq1.distanceTo(eq3).rad(), pi) numpy.testing.assert_almost_equal(eq2.distanceTo(eq3).rad(), pi-1.) numpy.testing.assert_almost_equal(north_pole.distanceTo(south_pole).rad(), pi) numpy.testing.assert_almost_equal(eq1.distanceTo(north_pole).rad(), pi/2.) numpy.testing.assert_almost_equal(eq2.distanceTo(north_pole).rad(), pi/2.) numpy.testing.assert_almost_equal(eq3.distanceTo(north_pole).rad(), pi/2.) numpy.testing.assert_almost_equal(eq1.distanceTo(south_pole).rad(), pi/2.) numpy.testing.assert_almost_equal(eq2.distanceTo(south_pole).rad(), pi/2.) numpy.testing.assert_almost_equal(eq3.distanceTo(south_pole).rad(), pi/2.) c1 = galsim.CelestialCoord(0.234 * galsim.radians, 0.342 * galsim.radians) # Some random point c2 = galsim.CelestialCoord(0.234 * galsim.radians, -1.093 * galsim.radians) # Same meridian c3 = galsim.CelestialCoord((pi + 0.234) * galsim.radians, -0.342 * galsim.radians) # Antipode c4 = galsim.CelestialCoord((pi + 0.234) * galsim.radians, 0.832 * galsim.radians) # Different point on opposide meridian numpy.testing.assert_almost_equal(c1.distanceTo(c1).rad(), 0.) numpy.testing.assert_almost_equal(c1.distanceTo(c2).rad(), 1.435) numpy.testing.assert_almost_equal(c1.distanceTo(c3).rad(), pi) numpy.testing.assert_almost_equal(c1.distanceTo(c4).rad(), pi-1.174) # Now some that require spherical trig calculations. # Importantly, this uses the more straightforward spherical trig formula, the cosine rule. # The CelestialCoord class uses a different formula that is more stable for very small # distances, which are typical in the correlation function calculation. c5 = galsim.CelestialCoord(1.832 * galsim.radians, -0.723 * galsim.radians) # Some other random point # The standard formula is: # cos(d) = sin(dec1) sin(dec2) + cos(dec1) cos(dec2) cos(delta ra) d = arccos(sin(0.342) * sin(-0.723) + cos(0.342) * cos(-0.723) * cos(1.832 - 0.234)) numpy.testing.assert_almost_equal(c1.distanceTo(c5).rad(), d) # Tiny displacements should have dsq = (dra^2 cos^2 dec) + (ddec^2) c6 = galsim.CelestialCoord((0.234 + 1.7e-9) * galsim.radians, 0.342 * galsim.radians) c7 = galsim.CelestialCoord(0.234 * galsim.radians, (0.342 + 1.9e-9) * galsim.radians) c8 = galsim.CelestialCoord((0.234 + 2.3e-9) * galsim.radians, (0.342 + 1.2e-9) * galsim.radians) # Note that the standard formula gets thsse wrong. d comes back as 0. d = arccos(sin(0.342) * sin(0.342) + cos(0.342) * cos(0.342) * cos(1.7e-9)) print 'd(c6) = ',1.7e-9 * cos(0.342), c1.distanceTo(c6), d d = arccos(sin(0.342) * sin(0.342+1.9e-9) + cos(0.342) * cos(0.342+1.9e-9) * cos(0.)) print 'd(c7) = ',1.9e-9, c1.distanceTo(c7), d d = arccos(sin(0.342) * sin(0.342) + cos(0.342) * cos(0.342) * cos(1.2e-9)) true_d = sqrt( (2.3e-9 * cos(0.342))**2 + 1.2e-9**2) print 'd(c7) = ',true_d, c1.distanceTo(c8), d numpy.testing.assert_almost_equal(c1.distanceTo(c6).rad()/(1.7e-9 * cos(0.342)), 1.0) numpy.testing.assert_almost_equal(c1.distanceTo(c7).rad()/1.9e-9, 1.0) numpy.testing.assert_almost_equal(c1.distanceTo(c8).rad()/true_d, 1.0)
def match(self): """ 第1の画像の各記述子について、第2の画像の対応点を求める。 入力:desc1(第1の画像の記述子)、desc2(第2の画像の記述子)""" if self._image_1.get_sift_descriptors() is None: self._image_1.make_sift_feature() desc1 = numpy.array([d / numpy.linalg.norm(d) for d in self._image_1.get_sift_descriptors()]) if self._image_2.get_sift_descriptors() is None: self._image_2.make_sift_feature() desc2 = numpy.array([d / numpy.linalg.norm(d) for d in self._image_2.get_sift_descriptors()]) dist_ratio = 0.6 desc1_size = desc1.shape matchscores = numpy.zeros(desc1_size[0], 'int') desc2t = desc2.T # あらかじめ転置行列を計算しておく for i in range(desc1_size[0]): dotprods = numpy.dot(desc1[i,:],desc2t) # 内積ベクトル dotprods = 0.9999 * dotprods # 第2の画像の特徴点の逆余弦を求め、ソートし、番号を返す indx = numpy.argsort(numpy.arccos(dotprods)) # 最も近い近接点との角度が、2番目に近いもののdist_rasio倍以下か? if numpy.arccos(dotprods)[indx[0]] < dist_ratio * numpy.arccos(dotprods)[indx[1]]: matchscores[i] = int(indx[0]) self._match_score = matchscores
def rotation_matrix(a1, a2, b1, b2): """Returns a rotation matrix that rotates the vectors *a1* in the direction of *a2* and *b1* in the direction of *b2*. In the case that the angle between *a2* and *b2* is not the same as between *a1* and *b1*, a proper rotation matrix will anyway be constructed by first rotate *b2* in the *b1*, *b2* plane. """ a1 = np.asarray(a1, dtype=float) / np.linalg.norm(a1) b1 = np.asarray(b1, dtype=float) / np.linalg.norm(b1) c1 = np.cross(a1, b1) c1 /= np.linalg.norm(c1) # clean out rounding errors... a2 = np.asarray(a2, dtype=float) / np.linalg.norm(a2) b2 = np.asarray(b2, dtype=float) / np.linalg.norm(b2) c2 = np.cross(a2, b2) c2 /= np.linalg.norm(c2) # clean out rounding errors... # Calculate rotated *b2* theta = np.arccos(np.dot(a2, b2)) - np.arccos(np.dot(a1, b1)) b3 = np.sin(theta) * a2 + np.cos(theta) * b2 b3 /= np.linalg.norm(b3) # clean out rounding errors... A1 = np.array([a1, b1, c1]) A2 = np.array([a2, b3, c2]) R = np.linalg.solve(A1, A2).T return R
def clockwise_angle(x,y,r=None,r0=None): # angle between two vectors defined by three points, #(x0,y0),(x1,y1),(x2,y2) x1=x[0]-x[1] x2=x[2]-x[1] y1=y[0]-y[1] y2=y[2]-y[1] dot = x1*x2 + y1*y2 det = x1*y2 - y1*x2 angle = np.arctan2(det, dot) if angle<0: angle=-angle else: angle=2*np.pi-angle if r and r==r0: # may not make sense if r~=r0 L=np.sqrt(x2**2+y2**2) a=np.arccos(L/r) angle=angle-a L0=np.sqrt(x1**2+y1**2) if L0<r0: a0=np.arccos(L0/r0) angle=angle-a0 return angle
def get_new_cell(self): """Returns new basis vectors""" a = np.sqrt(self.a) b = np.sqrt(self.b) c = np.sqrt(self.c) ad = self.atoms.cell[0] / np.linalg.norm(self.atoms.cell[0]) Z = np.cross(self.atoms.cell[0], self.atoms.cell[1]) Z /= np.linalg.norm(Z) X = ad - np.dot(ad, Z) * Z X /= np.linalg.norm(X) Y = np.cross(Z, X) alpha = np.arccos(self.x / (2 * b * c)) beta = np.arccos(self.y / (2 * a * c)) gamma = np.arccos(self.z / (2 * a * b)) va = a * np.array([1, 0, 0]) vb = b * np.array([np.cos(gamma), np.sin(gamma), 0]) cx = np.cos(beta) cy = (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) \ / np.sin(gamma) cz = np.sqrt(1. - cx * cx - cy * cy) vc = c * np.array([cx, cy, cz]) abc = np.vstack((va, vb, vc)) T = np.vstack((X, Y, Z)) return np.dot(abc, T)
def rotation_params(r0, r1, r2): r10 = [a - b for a, b in zip(r1, r0)] r21 = [a - b for a, b in zip(r2, r1)] # print('r10 is ' +str(r10) ) # print('r21 is ' +str(r21) ) # angle between r10 and r21 # print('arg to arcos is ' +str(dot(r21,r10)/(norm(r21)*norm(r10))) ) arg = dot(r21, r10) / (norm(r21) * norm(r10)) if (norm(r21) * norm(r10) > 1e-16): if arg < 0: theta = 180 * arccos(max(-1, arg)) / pi else: theta = 180 * arccos(min(1, arg)) / pi else: theta = 0.0 # get normal vector to plane r0 r1 r2 u = cross(r21, r10) # check for collinear case if norm(u) < 1e-16: # pick random perpendicular vector if (abs(r21[0]) > 1e-16): u = [(-r21[1] - r21[2]) / r21[0], 1, 1] elif (abs(r21[1]) > 1e-16): u = [1, (-r21[0] - r21[2]) / r21[1], 1] elif (abs(r21[2]) > 1e-16): u = [1, 1, (-r21[0] - r21[1]) / r21[2]] return theta, u
def solve_nonlinear(self, params, unknowns, resids): x = params['xr'] y = params['yr'] z = params['z'] r = params['r'] alpha = params['alpha'] nTurbs = len(x) overlap_fraction = np.eye(nTurbs) for i in range(nTurbs): for j in range(nTurbs): #overlap_fraction[i][j] is the fraction of the area of turbine i in the wake from turbine j dx = x[i]-x[j] dy = abs(y[i]-y[j]) dz = abs(z[i]-z[j]) d = np.sqrt(dy**2+dz**2) R = r[j]+dx*alpha A = r[i]**2*np.pi overlap_area = 0 if dx <= 0: #if turbine i is in front of turbine j overlap_fraction[i][j] = 0.0 else: if d <= R-r[i]: #if turbine i is completely in the wake of turbine j if A <= np.pi*R**2: #if the area of turbine i is smaller than the wake from turbine j overlap_fraction[i][j] = 1.0 else: #if the area of turbine i is larger than tha wake from turbine j overlap_fraction[i][j] = np.pi*R**2/A elif d >= R+r[i]: #if turbine i is completely out of the wake overlap_fraction[i][j] = 0.0 else: #if turbine i overlaps partially with the wake overlap_area = r[i]**2.*np.arccos((d**2.+r[i]**2.-R**2.)/(2.0*d*r[i]))+R**2.*np.arccos((d**2.+R**2.-r[i]**2.)/(2.0*d*R))-0.5*np.sqrt((-d+r[i]+R)*(d+r[i]-R)*(d-r[i]+R)*(d+r[i]+R)) overlap_fraction[i][j] = overlap_area/A print "Overlap Fraction Matrix: ", overlap_fraction unknowns['overlap'] = overlap_fraction
def _compute_static_prob(tri, com): """ For an object with the given center of mass, compute the probability that the given tri would be the first to hit the ground if the object were dropped with a pose chosen uniformly at random. Parameters ---------- tri: (3,3) float, the vertices of a triangle cm: (3,) float, the center of mass of the object Returns ------- prob: float, the probability in [0,1] for the given triangle """ sv = [(v - com) / np.linalg.norm(v - com) for v in tri] # Use L'Huilier's Formula to compute spherical area a = np.arccos(min(1, max(-1, np.dot(sv[0], sv[1])))) b = np.arccos(min(1, max(-1, np.dot(sv[1], sv[2])))) c = np.arccos(min(1, max(-1, np.dot(sv[2], sv[0])))) s = (a + b + c) / 2.0 # Prevents weirdness with arctan try: return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan( (s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2))) except BaseException: s = s + 1e-8 return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan( (s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2)))
def star(a,b,c,alpha,beta,gamma): "Calculate unit cell volume, reciprocal cell volume, reciprocal lattice parameters" alpha=np.radians(alpha) beta=np.radians(beta) gamma=np.radians(gamma) V=2*a*b*c*\ np.sqrt(np.sin((alpha+beta+gamma)/2)*\ np.sin((-alpha+beta+gamma)/2)*\ np.sin((alpha-beta+gamma)/2)*\ np.sin((alpha+beta-gamma)/2)) Vstar=(2*np.pi)**3/V; astar=2*np.pi*b*c*np.sin(alpha)/V bstar=2*np.pi*a*c*np.sin(beta)/V cstar=2*np.pi*b*a*np.sin(gamma)/V alphastar=np.arccos((np.cos(beta)*np.cos(gamma)-\ np.cos(alpha))/ \ (np.sin(beta)*np.sin(gamma))) betastar= np.arccos((np.cos(alpha)*np.cos(gamma)-\ np.cos(beta))/ \ (np.sin(alpha)*np.sin(gamma))) gammastar=np.arccos((np.cos(alpha)*np.cos(beta)-\ np.cos(gamma))/ \ (np.sin(alpha)*np.sin(beta))) V=V alphastar=np.degrees(alphastar) betastar=np.degrees(betastar) gammastar=np.degrees(gammastar) return astar,bstar,cstar,alphastar,betastar,gammastar
def calculate_couplings_levine(dt: float, w_jk: Matrix, w_kj: Matrix) -> Matrix: """ Compute the non-adiabatic coupling according to: `Evaluation of the Time-Derivative Coupling for Accurate Electronic State Transition Probabilities from Numerical Simulations`. Garrett A. Meek and Benjamin G. Levine. dx.doi.org/10.1021/jz5009449 | J. Phys. Chem. Lett. 2014, 5, 2351−2356 """ # Orthonormalize the Overlap matrices w_jk = np.linalg.qr(w_jk)[0] w_kj = np.linalg.qr(w_kj)[0] # Diagonal matrix w_jj = np.diag(np.diag(w_jk)) w_kk = np.diag(np.diag(w_kj)) # remove the values from the diagonal np.fill_diagonal(w_jk, 0) np.fill_diagonal(w_kj, 0) # Components A + B acos_w_jj = np.arccos(w_jj) asin_w_jk = np.arcsin(w_jk) a = acos_w_jj - asin_w_jk b = acos_w_jj + asin_w_jk A = - np.sin(np.sinc(a)) B = np.sin(np.sinc(b)) # Components C + D acos_w_kk = np.arccos(w_kk) asin_w_kj = np.arcsin(w_kj) c = acos_w_kk - asin_w_kj d = acos_w_kk + asin_w_kj C = np.sin(np.sinc(c)) D = np.sin(np.sinc(d)) # Components E w_lj = np.sqrt(1 - (w_jj ** 2) - (w_kj ** 2)) w_lk = -(w_jk * w_jj + w_kk * w_kj) / w_lj asin_w_lj = np.arcsin(w_lj) asin_w_lk = np.arcsin(w_lk) asin_w_lj2 = asin_w_lj ** 2 asin_w_lk2 = asin_w_lk ** 2 t1 = w_lj * w_lk * asin_w_lj x1 = np.sqrt((1 - w_lj ** 2) * (1 - w_lk ** 2)) - 1 t2 = x1 * asin_w_lk t = t1 + t2 E_nonzero = 2 * asin_w_lj * t / (asin_w_lj2 - asin_w_lk2) # Check whether w_lj is different of zero E1 = np.where(np.abs(w_lj) > 1e-8, E_nonzero, np.zeros(A.shape)) E = np.where(np.isclose(asin_w_lj2, asin_w_lk2), w_lj ** 2, E1) cte = 1 / (2 * dt) return cte * (np.arccos(w_jj) * (A + B) + np.arcsin(w_kj) * (C + D) + E)
def test_02_08_three(self): '''Test the angles between three objects''' labels = np.zeros((10,10),int) labels[2,2] = 1 # x=3,y=4,5 triangle labels[2,5] = 2 labels[6,2] = 3 workspace, module = self.make_workspace(labels, M.D_WITHIN, 5) module.run(workspace) m = workspace.measurements fo = m.get_current_measurement(OBJECTS_NAME, "Neighbors_FirstClosestObjectNumber_5") self.assertEqual(len(fo),3) self.assertEqual(fo[0],2) self.assertEqual(fo[1],1) self.assertEqual(fo[2],1) so = m.get_current_measurement(OBJECTS_NAME, "Neighbors_SecondClosestObjectNumber_5") self.assertEqual(len(so),3) self.assertEqual(so[0],3) self.assertEqual(so[1],3) self.assertEqual(so[2],2) d = m.get_current_measurement(OBJECTS_NAME, "Neighbors_SecondClosestDistance_5") self.assertEqual(len(d),3) self.assertAlmostEqual(d[0],4) self.assertAlmostEqual(d[1],5) self.assertAlmostEqual(d[2],5) angle = m.get_current_measurement(OBJECTS_NAME, "Neighbors_AngleBetweenNeighbors_5") self.assertEqual(len(angle),3) self.assertAlmostEqual(angle[0],90) self.assertAlmostEqual(angle[1],np.arccos(3.0/5.0) * 180.0 / np.pi) self.assertAlmostEqual(angle[2],np.arccos(4.0/5.0) * 180.0 / np.pi)
def make_cluster(n): ''' Make n particles in sphere with distribution given by Plummer model. ''' particles = [] for i in range(n): mass = 1.0 / n # total mass of system normalised to 1 radius = 1.0 / N.sqrt( R.random() ** (-2.0/3.0) - 1.0 ) theta = R.uniform(0, 2*N.pi) phi = N.arccos(R.uniform(-1, 1)) x = radius * N.cos(theta) * N.sin(phi) y = radius * N.sin(theta) * N.sin(phi) z = radius * N.cos(phi) pos = N.array((x, y, z)) # von Newmann's rejection technique a = 0.0 b = 0.1 while b > a*a * (1.0 - a*a)**3.5: a = R.uniform(0, 1) b = R.uniform(0, 0.1) velocity = a * N.sqrt(2.0) * (1.0 + radius*radius)**(-0.25) theta = R.uniform(0, 2*N.pi) phi = N.arccos(R.uniform(-1, 1)) vx = velocity * N.cos(theta) * N.sin(phi) vy = velocity * N.sin(theta) * N.sin(phi) vz = velocity * N.cos(phi) vel = N.array((vx, vy, vz)) p = Particle(mass, pos, vel) particles.append(p) return particles
def spherical_excess(a, b, c): "spherical excess of the triangle." A = arccos((cos(a) - cos(b) * cos(c)) / sin(b) / sin(c)) B = arccos((cos(b) - cos(c) * cos(a)) / sin(c) / sin(a)) C = arccos((cos(c) - cos(a) * cos(b)) / sin(a) / sin(b)) E = A + B + C - pi return(E)
def draw_cell_2d(axis, cell_output, total_radius=True, zorder=0, y_limits=None, alpha=1.0): """ """ (x, y, z) = cell_output.get_location() rad = cell_output.get_radius(total_radius=total_radius) if cell_output.color == None: print 'Cell has no defined color!' col = (0, 1, 0) else: col = cell_output.color #col = (0, 1, 0) if cell_output.color == None else cell_output.color #col = cell_output.color if (y_limits != None) and (y - rad < y_limits[0]): segment = toolbox_schematic.CircleSegment() segment.set_defaults(alpha=alpha, edgecolor='none', facecolor=col, zorder=zorder) angle = pi - numpy.arccos((y - y_limits[0])/rad) segment.set_points((y, x), rad, [angle, -angle]) segment.draw(axis) segment.set_points((y - y_limits[0] + y_limits[1], x), rad, [angle, 2*pi-angle]) segment.draw(axis) elif (y_limits != None) and (y + rad > y_limits[1]): segment = toolbox_schematic.CircleSegment() segment.set_defaults(alpha=alpha, edgecolor='none', facecolor=col, zorder=zorder) angle = numpy.arccos((y_limits[1] - y)/rad) segment.set_points((y, x), rad, [angle, 2*pi-angle]) segment.draw(axis) segment.set_points((y + y_limits[0] - y_limits[1], x), rad, [-angle, angle]) segment.draw(axis) else: circle = toolbox_schematic.Circle() circle.set_defaults(alpha=alpha, edgecolor='none', facecolor=col, zorder=zorder) circle.set_points((y, x), rad) circle.draw(axis)
def Jacobsen(h1, Xm_1, h2, Xm_2): alp = np.degrees(np.arccos(np.dot(h1, h2) / (np.linalg.norm(h1) * np.linalg.norm(h2)))) bet = np.degrees(np.arccos(np.dot(Xm_1, Xm_2) / (np.linalg.norm(Xm_1) * np.linalg.norm(Xm_2)))) if ((alp - bet)**2) > 1: print('check your indexing!') a = 3.567 # diamond lattice parameter # recip lattice par(note this is the mantid convention: no 2 pi) ast = (2 * np.pi) / a B = np.array([[ast, 0, 0], [0, ast, 0], [0, 0, ast]]) Xm_g = np.cross(Xm_1, Xm_2) Xm = np.column_stack([Xm_1, Xm_2, Xm_g]) # Vector Q1 is described in reciprocal space by its coordinate matrix h1 Xa_1 = B.dot(h1) Xa_2 = B.dot(h2) Xa_g = np.cross(Xa_1, Xa_2) Xa = np.column_stack((Xa_1, Xa_2, Xa_g)) R = Xa.dot(np.linalg.inv(Xm)) U = np.linalg.inv(R) UB = U.dot(B) return UB
def forward(self, lons, lats, az, dist, radians=False): """ Forward geodetic problem from a point """ if not radians: lons = np.array(lons) * pi / 180.0 lats = np.array(lats) * pi / 180.0 az = np.array(az) * pi / 180.0 d_ = dist / self.radius lats2 = np.arcsin(np.sin(lats) * np.cos(d_) + np.cos(lats) * np.sin(d_) * np.cos(az)) dlons = np.arccos((np.cos(d_) - np.sin(lats2) * np.sin(lats)) / (np.cos(lats) * np.cos(lats2))) baz = np.arccos((np.sin(lats) - np.cos(d_) * np.sin(lats2)) / (np.sin(d_) * np.cos(lats2))) if 0 <= az < pi: lons2 = lons + dlons baz = -baz elif pi <= az < 2*pi: lons2 = lons - dlons else: raise ValueError("azimuth should be [0, 2pi)") baz = geodesy.unroll_rad(baz) if not radians: lons2 = np.array(lons2) * 180 / pi lats2 = np.array(lats2) * 180 / pi baz = np.array(baz) * 180 / pi return lons2, lats2, baz
def main(): map_obstacles = np.loadtxt('obstacles_map.txt') laser_obstacles = np.loadtxt('obstacles_laser.txt') true_rotation = np.pi / 10 true_translation = np.array([5, 5]) laser_rot = rotate(laser_obstacles, true_rotation) laser_trans = translate(laser_rot, true_translation) t, r = relocalize(map_obstacles, laser_rot) theta = np.arccos(r.item(0, 1)) print "True Rotation:", true_rotation print "True Translation:", true_translation print "-------------------------------------" print "Estimated Rotation:", theta print "Estimated Translation:", t print "-------------------------------------" print "Rotation Error:", np.abs(true_rotation - theta) print "Translation Error:", np.abs(true_translation - t) laser_reloc = rotate(laser_rot, -np.arccos(r.item(0, 1))) plot_super(map_obstacles, laser_obstacles, "Original") plot_super(map_obstacles, laser_trans, "Measure misaligned with map") plot_super(map_obstacles, laser_reloc, "Measure realigned with map") plt.show()
def uniform_random_ellipsoid5d(Npts, r1, r2, r3, r4, r5): """ 5D case of uniform_random_ellipsoid """ r = np.random.rand(Npts) ph = np.random.rand(Npts) * 2.*np.pi costh1 = np.random.rand(Npts)*2.-1. costh2 = np.random.rand(Npts)*2.-1. costh3 = np.random.rand(Npts)*2.-1. sinth1 = np.sqrt(1.-costh1*costh1) sinth2 = np.sqrt(1.-costh2*costh2) sinth3 = np.sqrt(1.-costh3*costh3) th1 = np.arccos(costh1) th2 = np.arccos(costh2) th3 = np.arccos(costh3) rrt = r**(1./5.) x1 = r1 * rrt * sinth1 * sinth2 * sinth3 * np.cos(ph) x2 = r2 * rrt * sinth1 * sinth2 * sinth3 * np.sin(ph) x3 = r3 * rrt * sinth1 * sinth2 * costh3 x4 = r4 * rrt * sinth1 * costh2 x5 = r5 * rrt * costh1 cart_pts = np.transpose((x1,x2,x3,x4,x5)) sph_pts = np.transpose((rrt,th1,th2,th3,ph)) origin = np.array([[0.,0.,0.,0.,0.]]) # Always put a pt at ellipse center cart_pts = np.append(origin, cart_pts, axis=0) sph_pts = np.append(origin, sph_pts, axis=0) return cart_pts, sph_pts
def qea(im): H = ss.hilbert(im,axis = 2) H = im+1j*H ia = np.abs(H) ip = np.angle(H) h1col = H[1:-1,:,:] h0col = H[:-2,:,:] h2col = H[2:,:,:] ifColSign = np.sign(np.real((h0col-h2col)/(2j*h1col))) ifCol = np.arccos((h2col+h0col)/(2*h1col)) ifCol = (np.abs(ifCol)*ifColSign)/np.pi/2 ifCol = np.pad(ifCol,((1,1),(0,0),(0,0)), mode='reflect') h0row = H[:,:-2,:] h1row = H[:,1:-1,:] h2row = H[:,2:,:] #ifxSign = np.sign(np.real((h2x-h0x)/(2j*h1x))) ifRow = np.arccos((h2row+h0row)/(2*h1row)) ifRow = (np.abs(ifRow))/np.pi/2 ifRow = np.pad(ifRow,((0,0),(1,1),(0,0)), mode='reflect') h0time = H[:,:,:-2] h1time = H[:,:,1:-1] h2time = H[:,:,2:] #ifxSign = np.sign(np.real((h2x-h0x)/(2j*h1x))) ifTime = np.arccos((h2time+h0time)/(2*h1time)) ifTime = (np.abs(ifTime))/np.pi/2 ifTime = np.pad(ifTime,((0,0),(0,0),(1,1)), mode='reflect') return(ia,ip,ifRow,ifCol,ifTime)
def _get_lw(box): p0 = box[0] p1 = box[1] vec1 = np.array(box[2] - p0) vec1 = vec1 / np.linalg.norm(vec1) vec2 = np.array(p1 - p0) vec2 = vec2 / np.linalg.norm(vec2) vec3 = np.array(box[3] - p0) vec3 = vec3 / np.linalg.norm(vec3) ang1 = np.arccos((vec1).dot(vec2)) ang2 = np.arccos((vec3).dot(vec2)) dif1 = 1.5708 - ang1 dif2 = 1.5708 - ang2 if dif1 < dif2: p2 = box[2] else: p2 = box[3] l, lp = np.linalg.norm(abs(p1 - p0)), p1 w, wp = np.linalg.norm(abs(p2 - p0)), p2 if l < w: temp = w templ = wp w = l wp = lp l = temp lp = templ direc = (wp - p0) / np.linalg.norm(wp - p0) dot = direc.dot(np.array([0, 1])) vcost = abs(dot) return l, w, vcost
def cart2sph(x, y, z): """ Cartesian coordinates to spherical polar coordinates Returns r,fi (azimuth),teta (inclination, not elevation, ie, not latitude) """ if not isarray(x): x = np.array([x]) y = np.array([y]) z = np.array([z]) r = 0.0 * x teta = 0.0 * x fi = 0.0 * x r = np.sqrt(x ** 2 + y ** 2 + z ** 2) teta[r != 0] = np.arccos(z[r != 0] / r[r != 0]) rr = np.sqrt(x ** 2 + y ** 2) c1 = (y >= 0) & (rr != 0) c2 = (y < 0) & (rr != 0) fi[c1] = np.arccos(x[c1] / rr[c1]) # y>=0 & rr!=0 fi[c2] = 2 * np.pi - np.arccos(x[c2] / rr[c2]) # y<0 & rr!=0 return r, fi, teta
def PlotEccOrbit_aRs(par,t): """Function to plot planet orbit in 3D""" #read in parameters T0,P,a_Rstar,p,b,c1,c2,e,w,foot,Tgrad,Sec_depth = par #ensure b and p >= 0 if b<0.: b=-b if p<0.: p=-p w *= np.pi / 180. i = np.arccos(b/a_Rstar) #make w lie in range 0-2pi if w >= 2*np.pi: w -= 2*np.pi #make f lie in range 0-2pi elif w < 0: w += 2*np.pi #make f lie in range 0-2pi #true anomaly of central transit time f = 1.*np.pi/2. + w if f >= 2*np.pi: f -= 2*np.pi #make f lie in range 0-2pi elif f < 0: f += 2*np.pi #make f lie in range 0-2pi if f < np.pi: E = np.arccos( (np.cos(f) + e) / (e*np.cos(f)+1.) ) M_tr = E - e*np.sin(E) T_peri = T0 + M_tr * P/(2*np.pi) if f >= np.pi: #f = np.pi - f #correct for acos calc E = np.arccos( (np.cos(f) + e) / (e*np.cos(f)+1.) ) M_tr = E - e*np.sin(E) #M_tr = 2*np.pi - M_tr T_peri = T0 - M_tr * P/(2*np.pi) #calculate mean anomaly M = (2*np.pi/P) * (t - T_peri) #get coords x = PlanetOrbit.get_x(M,a_Rstar,e,w) y = PlanetOrbit.get_y(M,a_Rstar,e,w,i) z = PlanetOrbit.get_z(M,a_Rstar,e,w,i) #make plot ax = Axes3D(pylab.gcf()) ax.plot(x, y, z, c='k') ax.scatter(x, y, z, c='r', s=50) ax.scatter([0],[0],[0],c='y', s=500) #plot star position ax.scatter([x[0],],[y[0],],[z[0],],c='g', s=100) #plot initial planet position ax.scatter([x[1],],[y[1],],[z[1],],c='y', s=100) #plot initial planet position ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') range = abs(np.array([x,y,z])).max() ax.set_xlim3d(-range,range) ax.set_ylim3d(-range,range) ax.set_zlim3d(-range,range)
def compute_cd(self): # DA is the measured sensor value # 1) compute diagonal E (using A, D + angle DA) # E = sqrt(A * A + D * D - 2 * A * D * cos(DA)) e = numpy.sqrt( self.a * self.a + self.d * self.d - 2 * self.a * self.d * cos(radians(self.da))) # 2) compute angle ED of triangle 1 (using A, D, E) # ED = acos((E * E + D * D - A * A) / (2 * E * D)) ed = numpy.arccos( (e * e + self.d * self.d - self.a * self.a) / (2 * e * self.d)) # 3) compute angle of CE of triangle 2 (using B, C, E) # CE = acos((E * E + C * C - B * B) / (2 * E * C)) ce = numpy.arccos( (e * e + self.c * self.c - self.b * self.b) / (2 * e * self.c)) # 4) add angles #2 and #3 # CD = CE + ED cd = ce + ed # radians #e = numpy.degrees(cd) - self.cd #print("calf link angle error: %s" % e) # 5) compute excursion of spring using angle #4 # sqrt(F * F + G * G - 2 * F * G * cos(CD)) return cd
def circle_intersection_area(r, R, d): ''' Formula from: http://mathworld.wolfram.com/Circle-CircleIntersection.html Does not make sense for negative r, R or d >>> circle_intersection_area(0.0, 0.0, 0.0) 0.0 >>> circle_intersection_area(1.0, 1.0, 0.0) 3.1415... >>> circle_intersection_area(1.0, 1.0, 1.0) 1.2283... ''' if np.abs(d) < tol: minR = np.min([r, R]) return np.pi * minR**2 if np.abs(r - 0) < tol or np.abs(R - 0) < tol: return 0.0 d2, r2, R2 = float(d**2), float(r**2), float(R**2) arg = (d2 + r2 - R2) / 2 / d / r arg = np.max([np.min([arg, 1.0]), -1.0]) # Even with valid arguments, the above computation may result in things like -1.001 A = r2 * np.arccos(arg) arg = (d2 + R2 - r2) / 2 / d / R arg = np.max([np.min([arg, 1.0]), -1.0]) B = R2 * np.arccos(arg) arg = (-d + r + R) * (d + r - R) * (d - r + R) * (d + r + R) arg = np.max([arg, 0]) C = -0.5 * np.sqrt(arg) return A + B + C
def calculateDiffuseRay(self, intersectionPoint, firstGeometry): pointNormal = firstGeometry.getNormal(intersectionPoint) r1 = rand.random() r2 = rand.random() phi = 2 * np.pi * r1 theta = np.arccos(np.sqrt(r2)) # To cartesian coordinates x = np.cos(phi) * np.sin(theta) y = np.sin(phi) * np.sin(theta) z = np.cos(theta) newDirection = [x, y, z] # Rotate new direction to distribution of normal vector el = -1 * np.arccos(pointNormal[2]) az = -1 * np.arctan2(pointNormal[1], pointNormal[0]) rotationRay = [np.cos(el) * newDirection[0] - np.sin(el) * newDirection[2], newDirection[1], np.sin(el) * newDirection[0] + np.cos(el) * newDirection[2]] rotationRay = [np.cos(az) * rotationRay[0] + np.sin(az) * rotationRay[1], -1 * np.sin(az) * rotationRay[0] + np.cos(az) * rotationRay[1], rotationRay[2]] newDirectionCorrect = rotationRay / np.linalg.norm(rotationRay) randomRay = Ray(newDirectionCorrect, intersectionPoint + (0.00001 * newDirectionCorrect)) return randomRay
def overlap(self, blob): """Overlap between two blobs. Defined by the overlap area. """ # For now it is just the overlap area of two containment circles # It could be replaced by the Q or C factor, which also defines # a certain neighborhood. d = sqrt((self.x_pos - blob.x_pos) ** 2 + (self.y_pos - blob.y_pos) ** 2) # One circle lies inside the other if d < abs(self.radius - blob.radius): area = pi * min(self.radius, blob.radius) ** 2 # Circles don't overlap elif d > (self.radius + blob.radius): area = 0 # Compute overlap area. # Reference: http://mathworld.wolfram.com/Circle-CircleIntersection.html (04.04.2013) else: term_a = blob.radius ** 2 * arccos((d ** 2 + blob.radius ** 2 - self.radius ** 2) / (2 * d * blob.radius)) term_b = self.radius ** 2 * arccos((d ** 2 + self.radius ** 2 - blob.radius ** 2) / (2 * d * self.radius)) term_c = 0.5 * sqrt( abs( (-d + self.radius + blob.radius) * (d + self.radius - blob.radius) * (d - self.radius + blob.radius) * (d + self.radius + blob.radius) ) ) area = term_a + term_b - term_c return max(area / self.area(), area / blob.area())
def miso((q1, q2)): q1 = quaternion.Quaternion(numpy.array(q1) / numpy.linalg.norm(q1)).conjugate() q2 = quaternion.Quaternion(numpy.array(q2) / numpy.linalg.norm(q2)).conjugate() misot = 180.0 misoa = None for i in range(len(cubicSym)): for ii in range(len(orthoSym)): qa = orthoSym[ii] * q1 * cubicSym[i] for j in range(len(cubicSym)): #for jj in range(len(orthoSym)): qb = q2 * cubicSym[j] qasb1 = qa.conjugate() * qb qasb2 = qb * qa.conjugate() t1 = qasb1.wxyz / numpy.linalg.norm(qasb1.wxyz) t2 = qasb2.wxyz / numpy.linalg.norm(qasb2.wxyz) a1 = 2 * numpy.arccos(t1[0]) * 180 / numpy.pi a2 = 2 * numpy.arccos(t2[0]) * 180 / numpy.pi if a1 < misot: misot = a1 misoa = qasb1 if a2 < misot: misot = a2 misoa = qasb2 return misot
def GetOrientTransmat(self, biounit=False, target=False): cen = np.zeros((3,1)) cenlist = self.Centroid(biounit, target) cen[0:3] = [[cenlist[0]],[cenlist[1]],[cenlist[2]]] tmat = translation_matrix(-self.Centroid(biounit, target)) max = 0 farthestxyz = None for atom in self.IterAtoms(biounit, target): dist = np.sum((atom.xyz[0:3] - cen)**2) if dist > max: farthestxyz = atom.xyz[0:3] - cen max = dist firstrotax = np.cross(farthestxyz.transpose().tolist()[0], np.array([1,0,0])) firstrotang = np.arccos(np.dot(farthestxyz.transpose().tolist()[0], np.array([1,0,0]))/(np.sqrt(np.dot(farthestxyz.transpose().tolist()[0], farthestxyz.transpose().tolist()[0])))) firstrotmat = rotation_matrix(firstrotang, firstrotax, self.Centroid(biounit, target)) max = 0 firsttransmat = tmat.dot(firstrotmat) for atom in self.IterAtoms(biounit, target): dist = sum(firsttransmat.dot(atom.xyz)[1:3]**2) if dist > max: secfarthestyz = firsttransmat.dot(atom.xyz)[0:3] max = dist secfarthestyz[0] = 0 secondrotax = np.array([1,0,0]) secondrotang = np.arccos(np.dot(secfarthestyz.transpose().tolist()[0], np.array([0,1,0]))/np.sqrt(np.dot(secfarthestyz.transpose().tolist()[0],secfarthestyz.transpose().tolist()[0]))) secondrotmat = rotation_matrix(secondrotang, secondrotax, self.Centroid(biounit, target)) return secondrotmat.dot(firsttransmat)
def get_sunset_ele(d): if (-np.tan(get_declination_angle(d)) * np.tan(np.deg2rad(lat))) > 1.0: return np.arccos(1.0) elif (-np.tan(get_declination_angle(d)) * np.tan(np.deg2rad(lat))) < -1.0: return np.arccos(-1.0) else: return np.arccos(-np.tan(get_declination_angle(d)) * np.tan(np.deg2rad(lat)))
def test_water_cost_angle_ic(): fn_xyz = context.get_fn('test/water_trajectory.xyz') system = System.from_file(fn_xyz, ffatypes=['O', 'H', 'H']) system.detect_bonds() fn_pars = context.get_fn('test/parameters_water.txt') parameters = Parameters.from_file(fn_pars) del parameters.sections['FIXQ'] del parameters.sections['DAMPDISP'] del parameters.sections['EXPREP'] refpos = np.array([ [0.0, 0.0, 0.0], [0.0, 0.0, 1.1], [0.0, 1.1, 0.0], ])*angstrom rules = [ScaleRule('BENDCHARM', 'PARS', 'H\s*O\s*H', 4)] mods = [ParameterModifier(rules)] pt = ParameterTransform(parameters, mods) simulations = [GeoOptSimulation('only', system)] tests = [ICTest(5*deg, refpos, simulations[0], BendGroup(system))] assert tests[0].icgroup.cases == [[2, 0, 1]] cost = CostFunction(pt, {'all': tests}) x = np.array([1.0]) assert abs(cost(x) - np.log(0.5*((np.arccos(2.7892000007e-02) - 1.5707963267948966)/(5*deg))**2)) < 1e-4 x = np.array([1.1]) assert abs(cost(x) - np.log(0.5*((np.arccos(1.1*2.7892000007e-02) - 1.5707963267948966)/(5*deg))**2)) < 1e-4 x = np.array([0.8]) assert abs(cost(x) - np.log(0.5*((np.arccos(0.8*2.7892000007e-02) - 1.5707963267948966)/(5*deg))**2)) < 1e-4
def findAnglesBetweenTwoVectors1(v1, v2): v1_u = unitVector(v1) v2_u = unitVector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
for j in range(ny): Bot[i, j] = [bx[i, j], by[i, j]] #only note (useless) av = np.array([bx[i, j], by[i, j]]) Bot_mag[i, j] = np.linalg.norm(av) Bpt[i, j] = [bxp[i, j], byp[i, j]] #only note (useless) bv = np.array([bxp[i, j], byp[i, j]]) Bpt_mag[i, j] = np.linalg.norm(bv) cv = np.subtract(av, bv) Bnt_mag[i, j] = np.linalg.norm(cv) Brt_mag[i, j] = np.sqrt((Bot_mag[i, j]**2) + (bz[i, j]**2)) #Calculate shear angle between observed and potential field denum[i, j] = (Bot_mag[i, j] * Bpt_mag[i, j]) if denum[i, j] != 0: cos_shear[i, j] = (av @ bv) / denum[i, j] shear_ang[i, j] = np.arccos(cos_shear[i, j]) shear_deg[i, j] = np.math.degrees(shear_ang[i, j]) #Calculate proxy of the photospheric magnetic energy exc_erg[i, j] = (Bnt_mag[i, j]**2) * dAr / (8 * np.math.pi) exc_ergn = np.zeros((nx, ny), float) for i in range(nx): for j in range(ny - 1): if abs(bzn[i, j]) >= 0.1: exc_ergn[i, j] = exc_erg[i, j] else: exc_ergn[i, j] = 0.0 pot_erg = np.zeros((nx, ny), float) tot_erg = np.zeros((nx, ny), float)
} # Query user for desired number of streams: print "2, 4, 6, 8, or 16 streams are available in the CRTM." n_streams = raw_input("Select the number of streams: ") selector = int(switcher.get(int(n_streams), "Invalid number of streams!")) streams = [2,4,6,8,16] low = selector high = low + int(n_streams) coeff = coeff[low:high] reso = 1800 p = np.zeros(reso) x = np.arange(-1,1,2./reso) for ii in range(0,reso): # Reconstruction of the phase function: p[ii] = np.polynomial.legendre.legval(x[ii],coeff) plt.figure(1) plt.plot(180./np.pi*np.arccos(x),p) np.savetxt("phasefunction.txt",p,newline='\n') plt.xlabel('polar angle [$^{\circ}$ deg]', fontsize=22) plt.ylabel('$P_{11}$', fontsize=22) plt.grid('on') plt.figure(2) plt.semilogy(abs(coeff),'o-') plt.ylabel('$|C_n|$', fontsize=22) plt.xlabel('n', fontsize=22) plt.show()
def get_ang_norm(self): """Return the angular norm, i.e. the angular rotation, of this orientation.""" return 2 * np.arccos(self._s)
def main(): progname = os.path.basename(sys.argv[0]) usage = """prog [options] <crystal image> Orient crystals imaged via electron microscopy. """ parser = EMArgumentParser(usage=usage, version=EMANVERSION) parser.add_argument("--apix", required=True, type=float, default=False, help="Specify the Apix of your input images") parser.add_argument( "--params", required=True, type=str, help= "Lattice parameters separated by commas, i.e. '70.3,70.3,32.0,90.0,90.0,90.0'", default="", guitype='intbox', row=11, col=0, rowspan=1, colspan=1, mode="align") parser.add_argument( "--slab", type=float, help="Specify the thickness of the central slab. Default is 10.0", default=10.0) parser.add_argument( "--mindeg", type=float, help= "Specify the minimum angle for initial orientation search. Default is -180.0", default=-180.0) parser.add_argument( "--maxdeg", type=float, help= "Specify the maximum angle for initial orientation search. Default is 180.0", default=180.0) parser.add_argument( "--diameter", type=float, help="Specify the minimum spot diameter. Default is 5.0", default=5.0) parser.add_argument( "--maxshift", type=float, help= "Specify the maximum pixel shift when optimizing peak locations. Default is 32.0 pixels.", default=32.0) parser.add_argument( "--exper_weight", type=float, help= "Weight of penalty for spots in experimental data not found in the reference lattice. Default is 10.0.", default=10.0) parser.add_argument( "--data_weight", type=float, help= "Weight of penalty for points in reference lattice not found in the experimental data. Default is 1.0.", default=1.0) parser.add_argument( "--plot", default=False, help= "Show plot of reciprocal reference lattice points overlayed on input image and detected reflections.", action="store_true") parser.add_argument( "--threads", type=int, help= "Number of cores over which parallel optimization will be performed. Default is to use 1 core.", default=1) parser.add_argument( "--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID", default=-2) parser.add_argument( "--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help= "verbose level [0-9], higner number means higher level of verboseness") (options, args) = parser.parse_args() apix = float(options.apix) close = options.slab try: params = [float(p) for p in options.params.split(",")] a, b, c, alpha, beta, gamma = params except: print( "Could not read lattice parameters. Please specify as a comma separated list containing 'a,b,c,alpha,beta,gamma'." ) for fn in args: try: print(("READING {}".format(fn))) orig = EMData(fn) except: print(("Could not find {}".format(fn))) sys.exit(1) # PREPROCESSING nx = orig["nx"] orig.process_inplace("normalize.edgemean") #orig.process_inplace("filter.highpass.gauss",{"cutoff_pixels":2}) #orig.process_inplace("filter.lowpass.gauss",{"cutoff_abs":0.34}) nx = min(orig["nx"], orig["ny"]) # clip to min x,y to obtain square image reg = Region(0, 0, nx, nx) orig = orig.get_clip(reg) #orig.process_inplace("filter.highpass.gauss",{"cutoff_freq":0.01}) # remove strong signal at origin for improved peak finding orig.process_inplace("filter.xyaxes0", {"neighbornorm": 2}) #orig.process_inplace("mask.gaussian",{"outer_radius":orig["nx"]/8}) # overly stringent apodization #orig.process_inplace("mask.decayedge2d",{"width":nx/4}) # simple apodization orig.process_inplace("mask.gaussian", { "outer_radius": old_div(orig["nx"], 8), "exponent": 3.0 }) norig = orig.numpy().copy() fnorig = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(norig))) img = orig.process("math.realtofft") # obtain an amplitude image img.process_inplace("normalize") img.process_inplace("filter.lowpass.gauss", {"cutoff_abs": 0.1}) # strong lowpass ra = img.process("math.rotationalaverage") img -= ra img.process_inplace("threshold.belowtozero", {"minval": img["mean"] + 3.0 * img["sigma"]}) #img.process_inplace("threshold.notzero") nimg = img.numpy().copy() print("\nDETECTING SPOTS") # dilation of nimg with a size*size structuring element image_max = ndi.maximum_filter( nimg, size=np.min([a, b, c]).astype(int), mode='constant') # measured size of spot (rough) # 30 # compare image_max and nimg to find the coordinates of local maxima tmp = peak_local_max( nimg, min_distance=np.max([a, b, c]).astype(int) ) # measured minimum distance from origin to first point (rough) # 50 coords = [] for coord in tmp: if np.abs(np.linalg.norm(c - old_div(nimg.shape[0], 2.))) <= ( old_div(nx, 2.)): coords.append(coord) coords = np.asarray(coords) refined_coords = refine_peaks(coords, img, options.maxshift, options.maxshift * 2.) ds = old_div(1, (apix * nx)) # angstroms per fourier pixel exper_max_radius = np.linalg.norm(refined_coords - old_div(nx, 2), axis=1).max() print(("Highest resolution reflection: {:.2f}A ({} pix)\n".format( old_div(1, (ds * exper_max_radius)), int(round(exper_max_radius, 0))))) print("DETERMINING ORIENTATION") resolutions = np.asarray([ 1000., 100., 50., 25., 20., 18., 16., 10., 8., 5., 4., 3., 2.9, 2.8, 2.7, 2.6, 2.5, 2.4, 2.3, 2.2, 2.1, 2.0, 1.9, 1.8 ]) radii = [ float(r) for r in old_div(1, (resolutions * ds)) if r <= old_div(nx, 2.) ] # initial search range a_mindeg = options.mindeg a_maxdeg = options.maxdeg b_mindeg = options.mindeg b_maxdeg = options.maxdeg g_mindeg = options.mindeg g_maxdeg = options.maxdeg old_nrefs = 0.0 count = 0 for r in radii: ang = np.arccos(np.sqrt(old_div( (r**2 - options.diameter), r**2))) * 180. / np.pi #max_radius = r#1/np.sin(ang)**2 #if r > nx/2: continue max_resol = old_div(1, (r * ds)) hkl_exper = np.vstack([refined_coords[:, 1], refined_coords[:, 0]]).T hkl_exper = (hkl_exper - np.mean(hkl_exper, axis=0)) all_exper_radii = np.linalg.norm(hkl_exper, axis=1) hkl_exper = hkl_exper[all_exper_radii <= r] exper_radii = np.linalg.norm(hkl_exper, axis=1) hkl_exper = np.c_[hkl_exper, exper_radii] if len(hkl_exper) < 2: continue if len(hkl_exper) == old_nrefs: continue else: old_nrefs = len(hkl_exper) print(( "\nAngular step: {:.2f} degrees\tRadius: {:.2f} pixels ({:.2f} Angstroms)\t{} Reflections" .format(ang, r, max_resol, len(hkl_exper)))) hkl_ref = generate_lattice(nx, apix, r, a, b, c, alpha, beta, gamma) dist_exper, idx_exper = scipy.spatial.cKDTree( hkl_exper[:, :2]).query([0, 0], k=9) min_distance = np.min( [dist_exper[1], dist_exper[3], dist_exper[5], dist_exper[7]]) if count == 0: rngs = list( itertools.product(np.arange(a_mindeg, a_maxdeg + ang, ang), np.arange(b_mindeg, b_maxdeg + ang, ang), np.arange(g_mindeg, g_maxdeg + ang, ang))) else: rngs = [] for s in solns: print(s) a_mindeg = s[0] - ang a_maxdeg = s[0] + ang b_mindeg = s[1] - ang b_maxdeg = s[1] + ang c_mindeg = s[2] - ang c_maxdeg = s[2] + ang rngs.extend( list( itertools.product( np.arange(a_mindeg, a_maxdeg + ang, ang), np.arange(b_mindeg, b_maxdeg + ang, ang), np.arange(g_mindeg, g_maxdeg + ang, ang)))) count += 1 start = time.time() resq = queue.Queue(0) res = [0] * len(rngs) thds = [] if options.verbose: sys.stdout.write("\rCreating {} threads".format(len(rngs))) for i in range(len(rngs)): if options.verbose and i % 100 == 0: sys.stdout.write("\rCreating {}/{} threads".format( i + 1, len(rngs))) thd = threading.Thread(target=cost_async, args=(rngs[i], hkl_exper, hkl_ref, close, old_div(min_distance, 10.), options.exper_weight, options.data_weight, i, resq)) thds.append(thd) t0 = time.time() if options.verbose: sys.stdout.flush() minval = np.inf thrtolaunch = 0 while thrtolaunch < len(thds) or threading.active_count() > 1: if thrtolaunch < len(thds): while (threading.active_count() == options.threads): time.sleep(.1) if options.verbose and (thrtolaunch % 100 == 0 or len(thds) - thrtolaunch < 5): sys.stdout.write( "\rSearched {}/{} orientations".format( thrtolaunch + 1, len(thds))) thds[thrtolaunch].start() thrtolaunch += 1 else: time.sleep(0.5) while not resq.empty(): i, cx = resq.get() if cx < minval: minval = cx res[i] = cx for th in thds: th.join() solns = [rngs[i] for i, v in enumerate(res) if v == minval] sys.stdout.write("\t\t\tFound {} solutions:".format(len(solns))) print(solns) sys.stdout.flush() print("\n\nREFINING PARAMETERS") best_cost = np.inf scost = best_cost best_orient = None hkl_exper = np.vstack([refined_coords[:, 1], refined_coords[:, 0]]).T hkl_exper = (hkl_exper - np.mean(hkl_exper, axis=0)) all_exper_radii = np.linalg.norm(hkl_exper, axis=1) hkl_exper = hkl_exper[all_exper_radii <= exper_max_radius] exper_radii = np.linalg.norm(hkl_exper, axis=1) hkl_exper = np.c_[hkl_exper, exper_radii] hkl_ref = generate_lattice(nx, apix, exper_max_radius, a, b, c, alpha, beta, gamma) for ii, soln in enumerate(solns): hkl_ref = generate_lattice(nx, apix, exper_max_radius, a, b, c, alpha, beta, gamma) refine1 = optimize.fmin(cost, soln, args=( hkl_exper, hkl_ref, close, old_div(min_distance, 10.0), options.data_weight, options.exper_weight, ), disp=False) az, alt, phi = refine1[0], refine1[1], refine1[2] refine_apix = optimize.fmin(apix_cost, [apix], args=( az, alt, phi, hkl_exper, hkl_ref, close, 16., nx, exper_max_radius, options.data_weight, options.exper_weight, a, b, c, alpha, beta, gamma, ), disp=False) refine_apix = float(refine_apix[0]) #float(refine_apix.x) hkl_ref = generate_lattice(nx, refine_apix, exper_max_radius, a, b, c, alpha, beta, gamma) ds = old_div(1, (refine_apix * nx)) # angstroms per fourier pixel refine_close = optimize.fmin(close_cost, [close], args=( az, alt, phi, hkl_exper, hkl_ref, 8., 5.0, options.data_weight, options.exper_weight, ), disp=False) refine_close = refine_close[0] if refine_close <= 1.0: refine_close = close refine2 = optimize.fmin(cost, refine1, args=( hkl_exper, hkl_ref, refine_close, old_div(min_distance, 10.0), options.data_weight, options.exper_weight, ), disp=False) # re-refine orientation scost = cost(refine2, hkl_exper, hkl_ref, refine_close, min_distance, options.data_weight, options.exper_weight) if scost < best_cost: best_cost = scost best_orient = refine2 best_refine_apix = refine_apix best_refine_close = refine_close sys.stdout.flush() ds = old_div(1, (best_refine_apix * nx)) # angstroms per fourier pixel print(("Refined Apix: {:.2f} -> {:.2f}".format(apix, best_refine_apix))) print(("Refined thickness: {:.2f} -> {:.2f}".format( close, best_refine_close))) print(("Refined orientation: ({:.2f},{:.2f},{:.2f})\n".format( *best_orient))) hkl_ref = generate_lattice(nx, refine_apix, exper_max_radius, a, b, c, alpha, beta, gamma) pln = get_plane(best_orient, hkl_ref, close=best_refine_close) pln = pln[np.argsort(pln[:, 3])] # sort by radius if options.plot: plt.imshow(nimg, origin="lower", cmap=plt.cm.Greys_r) plt.scatter(hkl_exper[:, 1] + old_div(nx, 2), hkl_exper[:, 0] + old_div(nx, 2), c='b', marker='x') plt.scatter(pln[:, 1] + old_div(nx, 2), pln[:, 0] + old_div(nx, 2), c='r', marker='x') plt.axis("off") plt.title("(Az, Alt, Phi) -> ({:.2f},{:.2f},{:.2f})".format( *best_orient)) plt.show() print( " xc yc zc r resol h k l raw_F raw_p" ) with open("{}.sf".format(fn.split(".")[0]), "w") as sf: # create a quasi.sf file for this image for nrow, row in enumerate(pln): xc, yc, zc, r, h, k, l = row[:7] if r > 0: resol = old_div(1, (r * ds)) else: resol = "inf" if nrow in range(9): raw_F, raw_p = get_sf(fnorig, int(xc) + old_div(nx, 2), int(yc) + old_div(nx, 2), 2) #,show=True) else: raw_F, raw_p = get_sf(fnorig, int(xc) + old_div(nx, 2), int(yc) + old_div(nx, 2), 2) #,show=False) try: print(( "{:8.1f},{:8.1f},{:8.1f} {:6.1f} {:6.1f} {:4d} {:4d} {:4d} {:8.2f} {:8.2f}" .format(xc, yc, zc, r, resol, int(h), int(k), int(l), raw_F, raw_p))) except: print(( "{:8.1f},{:8.1f},{:8.1f} {:6.1f} inf {:4d} {:4d} {:4d} {:.2f} {:.2f}" .format(xc, yc, zc, r, int(h), int(k), int(l), raw_F, raw_p))) sf.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format( h, k, l, raw_F, raw_p, r, resol))
def perturb(self, params): """ Unlike in C++, this takes a numpy array of parameters as input, and modifies it in-place. The return value is still logH. """ logH = 0.0 reps = 1; if(rng.rand() < 0.5): reps += np.int(np.power(100.0, rng.rand())); # print "going to perturb %d reps" % reps for i in range(reps): # print " rep iteration %d" % i which = rng.randint(len(params)) if which == 0: rad_idx = 0 theta_idx = 2 theta = params[theta_idx] #FIND THE MAXIMUM RADIUS STILL INSIDE THE DETECTOR theta_eq = np.arctan(detector.detector_length/detector.detector_radius) theta_taper = np.arctan(detector.taper_length/detector.detector_radius) # print "theta: %f pi" % (theta / np.pi) if theta <= theta_taper: z = np.tan(theta)*(detector.detector_radius - detector.taper_length) / (1-np.tan(theta)) max_rad = z / np.sin(theta) elif theta <= theta_eq: max_rad = detector.detector_radius / np.cos(theta) # print "max rad radius: %f" % max_rad else: theta_comp = np.pi/2 - theta max_rad = detector.detector_length / np.cos(theta_comp) # print "max rad length: %f" % max_rad #AND THE MINIMUM (from PC dimple) #min_rad = 1./ ( np.cos(theta)**2/detector.pcRad**2 + np.sin(theta)**2/detector.pcLen**2 ) min_rad = np.amax([detector.pcRad, detector.pcLen]) total_max_rad = np.sqrt(detector.detector_length**2 + detector.detector_radius**2 ) params[which] += total_max_rad*dnest4.randh() params[which] = dnest4.wrap(params[which] , min_rad, max_rad) elif which ==2: #theta rad_idx = 0 rad = params[rad_idx] # print "rad: %f" % rad if rad < np.amin([detector.detector_radius - detector.taper_length, detector.detector_length]): max_val = np.pi/2 min_val = 0 # print "theta: min %f pi, max %f pi" % (min_val, max_val) else: if rad < detector.detector_radius - detector.taper_length: #can't possibly hit the taper # print "less than taper adjustment" min_val = 0 elif rad < np.sqrt(detector.detector_radius**2 + detector.taper_length**2): #low enough that it could hit the taper region # print "taper adjustment" a = detector.detector_radius - detector.taper_length z = 0.5 * (np.sqrt(2*rad**2-a**2) - a) min_val = np.arcsin(z/rad) else: #longer than could hit the taper # print " longer thantaper adjustment" min_val = np.arccos(detector.detector_radius/rad) if rad < detector.detector_length: max_val = np.pi/2 else: max_val = np.pi/2 - np.arccos(detector.detector_length/rad) # print "theta: min %f pi, max %f pi" % (min_val, max_val) params[which] += np.pi/2*dnest4.randh() params[which] = dnest4.wrap(params[which], min_val, max_val) # if which == 0: # params[which] += (detector.detector_radius)*dnest4.randh() # params[which] = dnest4.wrap(params[which] , 0, detector.detector_radius) elif which == 1: max_val = np.pi/4 params[which] += np.pi/4*dnest4.randh() params[which] = dnest4.wrap(params[which], 0, max_val) if params[which] < 0 or params[which] > np.pi/4: print "wtf phi" #params[which] = np.clip(params[which], 0, max_val) # elif which == 2: # params[which] += (detector.detector_length)*dnest4.randh() # params[which] = dnest4.wrap(params[which] , 0, detector.detector_length) elif which == 3: #scale min_scale = wf.wfMax - 0.01*wf.wfMax max_scale = wf.wfMax + 0.005*wf.wfMax params[which] += (max_scale-min_scale)*dnest4.randh() params[which] = dnest4.wrap(params[which], min_scale, max_scale) # print " adjusted scale to %f" % ( params[which]) elif which == 4: #t0 params[which] += 1*dnest4.randh() params[which] = dnest4.wrap(params[which], min_maxt, max_maxt) elif which == 5: #smooth params[which] += 0.1*dnest4.randh() params[which] = dnest4.wrap(params[which], 0, 25) # print " adjusted smooth to %f" % ( params[which]) # elif which == 6: #wf baseline slope (m) # logH -= -0.5*(params[which]/1E-4)**2 # params[which] += 1E-4*dnest4.randh() # logH += -0.5*(params[which]/1E-4)**2 # elif which == 7: #wf baseline incercept (b) # logH -= -0.5*(params[which]/1E-2)**2 # params[which] += 1E-2*dnest4.randh() # logH += -0.5*(params[which]/1E-2)**2 # params[which] += 0.01*dnest4.randh() # params[which]=dnest4.wrap(params[which], -1, 1) # print " adjusted b to %f" % ( params[which]) else: #velocity or rc params: cant be below 0, can be arb. large print "which value %d not supported" % which exit(0) return logH
return (-np.sort(-input, axis=-1)[..., :k], (n - (np.argsort( input[..., ::-1], kind='stable', axis=-1)[..., ::-1]))[..., :k]) # --- Begin Public Functions -------------------------------------------------- abs = utils.copy_docstring( # pylint: disable=redefined-builtin tf.math.abs, lambda x, name=None: np.abs(x)) accumulate_n = utils.copy_docstring( tf.math.accumulate_n, lambda inputs, shape=None, tensor_dtype=None, name=None: ( # pylint: disable=g-long-lambda sum(map(np.array, inputs)).astype(utils.numpy_dtype(tensor_dtype)))) acos = utils.copy_docstring(tf.math.acos, lambda x, name=None: np.arccos(x)) acosh = utils.copy_docstring(tf.math.acosh, lambda x, name=None: np.arccosh(x)) add = utils.copy_docstring(tf.math.add, lambda x, y, name=None: np.add(x, y)) add_n = utils.copy_docstring( tf.math.add_n, lambda inputs, name=None: sum(map(np.array, inputs))) angle = utils.copy_docstring(tf.math.angle, lambda input, name=None: np.angle(input)) argmax = utils.copy_docstring( tf.math.argmax, lambda input, axis=None, output_type=tf.int64, name=None: ( # pylint: disable=g-long-lambda np.argmax(input, axis=0 if axis is None else _astuple(axis)).astype(
def three_point_correlation_function(coord0, coord1, coord2, nbins=(100, 100, 100), histrange=[(0, 200), (0, 200), (0, 200)], triplet_to_calculate="DDD"): # For debugging #hist = np.histogram(coord0.transpose()[0], bins=nbins, range=histrange) #hist_tot = hist[0] #bin_edges = hist[1] #return hist_tot,bin_edges # Assume the data is coming in ngals x 3 arrays # x, y, z (all in Mpc) # For example, # [ [ 1253.0, 2384.4, 3425.24], # [ 1987.5, 2564.7, 2439.42], # ......................... ] #print("Ngals: %d %d" % (len(coord0), len(coord1))) nc0 = len(coord0) nc1 = len(coord1) nc2 = len(coord2) print("sizes: ", nc0, nc1, nc2) hist_tot = np.zeros(nbins, dtype=int) bin_edges = None triangles = [] for i in range(nc0): c0 = coord0[i] if i % 10 == 0: print("Outermost loop {0} of {1}".format(i, nc0)) lo1 = 0 if triplet_to_calculate=="DDD" or \ triplet_to_calculate=="DDR" or \ triplet_to_calculate=="RRR": lo1 = i + 1 for j in range(lo1, nc1): c1 = coord1[j] lo2 = 0 if triplet_to_calculate=="DDD" or \ triplet_to_calculate=="DRR" or \ triplet_to_calculate=="RRR": lo2 = j + 1 for k in range(lo2, nc2): c2 = coord2[k] #print(i,j,k) #print(c0,c1,c2) r01 = distance(c0, c1) r02 = distance(c0, c2) r12 = distance(c1, c2) # From Fig. 6 # https://arxiv.org/pdf/astro-ph/0403638.pdf sides = np.sort([r01, r02, r12]) r01 = sides[0] r02 = sides[1] r12 = sides[2] #print("sides") #print(sides) s = r01 q = r02 / r01 theta = np.arccos( (r01 * r01 + r02 * r02 - r12 * r12) / (2 * r01 * r02)) theta /= PI #print([s,q,theta]) triangles.append([s, q, theta]) triangles = np.array(triangles) print(len(triangles)) H, edges = np.histogramdd(triangles, bins=nbins, range=histrange) return H, edges
def d2z(d): phi = np.arccos(d2z_p_2 - d2z_p_3 * h_0 * d / 1.0e6) return d2z_p_0 * np.cos((phi + 4.0 * np.pi) / 3.0) + d2z_p_1
F = np.array(fList) N = pList Ttotal = np.sum(T, 0) Ttotal[0] = Ttotal[2] = 0 Ftotal = np.sum(F, 0) Ftotal[2] -= G Ftotal[1] = 0 print(beta) if(i == 0.06) : plt.scatter(N[:, 0], N[:, 1]) plt.scatter(N[4, 0], N[4, 1]) plt.grid() plt.show() sita = np.arccos(n[2]) print(sita * 180 / np.pi)
def SO3LogUp(rotation): psi = np.arccos((np.trace(rotation) - 1) / 2) return psi * np.array(rotation - rotation.T) / (2 * np.sin(psi))
def angular_distance(coord0, coord1, nbins=100, histrange=(0, 200), log_scale=False, same_coords=False, verbose=False): # Data is coming in as [ [xxx, xxx, xxx], ...] # and is in degrees coord0_T = coord0.transpose() ra0 = np.deg2rad(coord0_T[0]) dec0 = np.deg2rad(coord0_T[1]) coord1_T = coord1.transpose() ra1 = np.deg2rad(coord1_T[0]) dec1 = np.deg2rad(coord1_T[1]) cosdec0 = np.cos(dec0) cosdec1 = np.cos(dec1) sindec0 = np.sin(dec0) sindec1 = np.sin(dec1) #hist_bins_log = [] #hist_bins_log += np.linspace(0.001,0.009,9).tolist() #hist_bins_log += np.linspace(0.01,0.09,9).tolist() #hist_bins_log += np.linspace(0.1,0.9,9).tolist() #hist_bins_log += np.linspace(1.,10,10).tolist() hist_bins_log = np.logspace(np.log10(0.005), np.log10(10.0), 31) if log_scale == True: nbins = len(hist_bins_log) - 1 #nbins = len(hist_bins_log) hist_tot = np.zeros(nbins, dtype=int) bin_edges = None #print("HEREREERE") #print(hist_bins_log) #exit() ngals0 = len(ra0) for i in range(0, ngals0): #for i,(s,c,r) in enumerate(zip(sindec0,cosdec0,ra0)): #print(i,s,c,r) s = sindec0[i] c = cosdec0[i] r = ra0[i] if verbose: if i % 1000 == 0: print(i) cos_ang_dist = None if same_coords == False: cos_ang_dist = s * sindec1 + c * cosdec1 * np.cos(r - ra1) else: cos_ang_dist = s * sindec1[i + 1:] + c * cosdec1[i + 1:] * np.cos( r - ra1[i + 1:]) ang_dist = np.rad2deg(np.arccos(cos_ang_dist)) # Convert to degrees if log_scale == False: hist = np.histogram(ang_dist, bins=nbins, range=histrange) else: hist = np.histogram(ang_dist, bins=hist_bins_log) hist_tot += hist[0] bin_edges = hist[1] return hist_tot, bin_edges
def compute_angle(phi1, phi2): dot = np.sum(phi1*phi2) / (np.linalg.norm(phi1) * np.linalg.norm(phi2)) angle = np.arccos(dot) * 180 / np.pi return 0 if np.isnan(angle) else angle
def SO3Log(rotation): psi = np.arccos((np.trace(rotation) - 1) / 2) return psi * np.array([rotation[2,1] - rotation[1,2], rotation[0,2] - rotation[2,0], rotation[1,0] - rotation[0,1]]) / (2 * np.sin(psi))
def evaluatePlanes(planes, filename=None, depths=None, normals=None, invalidMask=None, outputFolder=None, outputIndex=0, colorMap=None): if filename != None: if 'mlt' not in filename: filename = filename.replace('color', 'mlt') pass normalFilename = filename.replace('mlt', 'norm_camera') normals = np.array(PIL.Image.open(normalFilename)).astype( np.float) / 255 * 2 - 1 norm = np.linalg.norm(normals, 2, 2) for c in range(3): normals[:, :, c] /= norm continue depthFilename = filename.replace('mlt', 'depth') depths = np.array(PIL.Image.open(depthFilename)).astype( np.float) / 1000 # if len(depths.shape) == 3: # depths = depths.mean(2) # pass maskFilename = filename.replace('mlt', 'valid') invalidMask = np.array(PIL.Image.open(maskFilename)) invalidMask = invalidMask < 128 invalidMask += depths > 10 pass height = normals.shape[0] width = normals.shape[1] focalLength = 517.97 urange = np.arange(width).reshape(1, -1).repeat(height, 0) - width * 0.5 vrange = np.arange(height).reshape(-1, 1).repeat(width, 1) - height * 0.5 ranges = np.array( [urange / focalLength, np.ones(urange.shape), -vrange / focalLength]).transpose([1, 2, 0]) X = depths / focalLength * urange Y = depths Z = -depths / focalLength * vrange d = -(normals[:, :, 0] * X + normals[:, :, 1] * Y + normals[:, :, 2] * Z) normalDotThreshold = np.cos(np.deg2rad(30)) distanceThreshold = 50000 reconstructedNormals = np.zeros(normals.shape) reconstructedDepths = np.zeros(depths.shape) segmentationImage = np.zeros((height, width, 3)) distanceMap = np.ones((height, width)) * distanceThreshold occupancyMask = np.zeros((height, width)).astype(np.bool) segmentationTest = np.zeros((height, width)) y = 297 x = 540 for planeIndex, plane in enumerate(planes): planeD = np.linalg.norm(plane) planeNormal = -plane / planeD normalXYZ = np.dot(ranges, planeNormal) normalXYZ = np.reciprocal(normalXYZ) planeY = -normalXYZ * planeD distance = np.abs(planeNormal[0] * X + planeNormal[1] * Y + planeNormal[2] * Z + planeD) / np.abs( np.dot(normals, planeNormal)) #distance = np.abs(planeY - depths) mask = (distance < distanceMap) * (planeY > 0) * (np.abs( np.dot(normals, planeNormal)) > normalDotThreshold) * ( np.abs(planeY - depths) < 0.5) occupancyMask += mask reconstructedNormals[mask] = planeNormal #if planeNormal[2] > 0.9: #print(planeD) #print(planeNormal) # minDepth = depths.min() # maxDepth = depths.max() # print(depths[300][300]) # print(planeY[300][300]) # print(depths[350][350]) # print(planeY[350][350]) # PIL.Image.fromarray((np.maximum(np.minimum((planeY - minDepth) / (maxDepth - minDepth), 1), 0) * 255).astype(np.uint8)).save(outputFolder + '/plane.png') # exit(1) #pass reconstructedDepths[mask] = planeY[mask] if colorMap != None and planeIndex in colorMap: segmentationImage[mask] = colorMap[planeIndex] else: segmentationImage[mask] = np.random.randint(255, size=(3, )) pass distanceMap[mask] = distance[mask] segmentationTest[mask] = planeIndex + 1 #print((planeIndex, planeY[y][x], distance[y][x], np.abs(np.dot(normals, planeNormal))[y][x])) continue # print(distanceMap.mean()) # print(distanceMap.max()) # print(np.abs(reconstructedDepths - depths)[occupancyMask].max()) # print(pow(reconstructedDepths - depths, 2)[True - invalidMask].mean()) # exit(1) # planeIndex = segmentationTest[y][x] # print(normals[y][x]) # plane = planes[int(planeIndex)] # planeD = np.linalg.norm(plane) # planeNormal = -plane / planeD # print((planeNormal, planeD)) # print(depths[y][x]) # print(reconstructedDepths[y][x]) # print(segmentationTest[y][x]) if outputFolder != None: depths[invalidMask] = 0 normals[invalidMask] = 0 reconstructedDepths[invalidMask] = 0 reconstructedNormals[invalidMask] = 0 minDepth = depths.min() maxDepth = depths.max() #print(minDepth) #print(maxDepth) PIL.Image.fromarray( ((depths - minDepth) / (maxDepth - minDepth) * 255).astype( np.uint8)).save(outputFolder + '/' + str(outputIndex) + '_depth.png') PIL.Image.fromarray((np.maximum( np.minimum( (reconstructedDepths - minDepth) / (maxDepth - minDepth), 1), 0) * 255).astype( np.uint8)).save(outputFolder + '/' + str(outputIndex) + '_depth_reconstructed.png') #PIL.Image.fromarray((np.maximum(np.minimum((reconstructedDepths - depths) / (distanceThreshold), 1), 0) * 255).astype(np.uint8)).save(outputFolder + '/depth_' + str(outputIndex) + '_diff.png') PIL.Image.fromarray(((normals + 1) / 2 * 255).astype( np.uint8)).save(outputFolder + '/' + str(outputIndex) + '_normal_.png') PIL.Image.fromarray(((reconstructedNormals + 1) / 2 * 255).astype( np.uint8)).save(outputFolder + '/' + str(outputIndex) + '_normal_reconstructed.png') PIL.Image.fromarray(segmentationImage.astype( np.uint8)).save(outputFolder + '/' + str(outputIndex) + '_plane_segmentation.png') #depthImage = ((depths - minDepth) / (maxDepth - minDepth) * 255).astype(np.uint8) #PIL.Image.fromarray((invalidMask * 255).astype(np.uint8)).save(outputFolder + '/mask.png') #exit(1) else: occupancy = (occupancyMask > 0.5).astype( np.float32).sum() / (1 - invalidMask).sum() invalidMask += np.invert(occupancyMask) #PIL.Image.fromarray(invalidMask.astype(np.uint8) * 255).save(outputFolder + '/mask.png') reconstructedDepths = np.maximum(np.minimum(reconstructedDepths, 10), 0) depthError = pow(reconstructedDepths - depths, 2)[np.invert(invalidMask)].mean() #depthError = distanceMap.mean() normalError = np.arccos( np.maximum( np.minimum(np.sum(reconstructedNormals * normals, 2), 1), -1))[np.invert(invalidMask)].mean() #normalError = pow(np.linalg.norm(reconstructedNormals - normals, 2, 2), 2)[True - invalidMask].mean() #print((depthError, normalError, occupancy)) # print(depths.max()) # print(depths.min()) # print(reconstructedDepths.max()) # print(reconstructedDepths.min()) # print(occupancy) # exit(1) #reconstructedDepths[np.invert(occupancyMask)] = depths[np.invert(occupancyMask)] return depthError, normalError, occupancy, segmentationTest, reconstructedDepths, occupancyMask return
R_p = D_p / 2 # [m]. Propeller disk radius S_p = np.pi * R_p**2 # [m^2]. Propeller disk area T_cruise = 10 # [N]. Propeller thrust during cruise T_VTOL = 10 # [N]. Propeller thrust during VTOL N_p = 4 # [-]. Amount of propellers omega_cruise = 60 # [rad/s]. Propeller angular velocity during cruise omega_VTOL = 60 # [rad/s]. Propeller angular velocity during VTOL # Wing discretization N = 1000 # [-]. Number of spanwise wing stations. K = 100 # [-]. Number of Fourier modes used. N>K for a solvable system alpha_fly = 2 # [deg]. Geometric angle of attack. dy = b / N # [m]. Width of panels y = np.linspace(-b / 2 + dy / 2, b / 2 - dy / 2, N) # [-]. Control point locations on mid-panel theta = np.arccos(-2 * y / b) # [-]. Coordinate transformation # Defining and computing the axial velocity induced by the propeller. def v_axial_propeller(V_0, T, rho, S_p): v_a = 0.5 * (-V_0 + np.sqrt(V_0**2 + 2 * T / (rho * S_p))) #Equation A.29 PhD Veldhuis return v_a v_a_cruise = v_axial_propeller(V_cruise, T_cruise, rho_cruise, S_p) v_a_stall = v_axial_propeller(V_stall, T_VTOL, rho_stall, S_p) v_a_VTOL = v_axial_propeller(V_VTOL, T_VTOL, rho_VTOL, S_p) # Defining and computing the radial/swirl velocity induced by the propellers
x2Sum = x2Sum + (x*x) y2Sum = y2Sum + (y*y) xySum = xySum + (x*y) volume = volume + area radius = math.sqrt(area/pi) if area > 0: xCom = xSum / area yCom = ySum / area x2Com = x2Sum / area y2Com = y2Sum / area xyCom = xySum / area arr = scipy.array([[ x2Com - (xCom * xCom), xyCom - (xCom * yCom)],[ xyCom - (xCom * yCom) , y2Com - (yCom * yCom) ]]) vals, vecs = scipy.linalg.eigh(arr) #print vecs.shape #print vecs #print vecs[0,1] semimajor = math.sqrt(abs(vals[0])) * 2.0 semiminor = math.sqrt(abs(vals[1])) * 2.0 orientation = numpy.arccos(vecs[0,1]) * (180 / pi) file.write('%i, %i, %i, %.3f, %i, %i, %.3f, %.3f, %.2f' % (slices, area, volume, radius, xCom, yCom, semimajor, semiminor, orientation)) file.write('\n') file.close print slices, area, volume, radius, xCom, yCom, semimajor, semiminor, orientation # TO Do # im.getbox() # Multiprocessing #
def angleToVector(self, vector): v0 = numpy.array(self._data, dtype=numpy.float64, copy=False) v1 = numpy.array(vector.getData(), dtype=numpy.float64, copy=False) dot = numpy.sum(v0 * v1) dot /= self._normalizeVector(v0) * self._normalizeVector(v1) return numpy.arccos(numpy.fabs(dot))
def calc_ik(self, tcp_pose, current_joint_pos): a1 = self.link_param[0] a2 = self.link_param[1] b = self.link_param[2] c1 = self.link_param[3] c2 = self.link_param[4] c3 = self.link_param[5] c4 = self.link_param[6] # 把持しているワークの座標を被Work座標からRobot座標への変換もやってる wrist_pos, wrist_orn_m = self._calc_wrist_pose(tcp_pose=tcp_pose) # A. c0 = np.array(wrist_pos) nx1 = np.sqrt(c0[0] ** 2 + c0[1] ** 2) - a1 s1_sq = nx1 ** 2 + (c0[2] - c1) ** 2 s2_sq = (nx1 + 2 * a1) ** 2 + (c0[2] - c1) ** 2 k_sq = a2 ** 2 + c3 ** 2 # B.1(J1) q1_ = [] q1_.append(np.arctan2(c0[1], c0[0])) if (q1_[0] < 0): q1_.append(np.arctan2(c0[1], c0[0]) + np.pi) else: q1_.append(np.arctan2(c0[1], c0[0]) - np.pi) q1_idx = self.getNearestValue(q1_, current_joint_pos[0]) q1 = q1_[q1_idx] # B.2(J2) q2_ = [] if q1_idx == 0: tmp = (s1_sq + c2 ** 2 - k_sq) / (2 * np.sqrt(s1_sq) * c2) if abs(tmp) > 1: return current_joint_pos, False q2_.append(- np.arccos(tmp) + np.arctan2(nx1, c0[2] - c1) - np.pi / 2) q2_.append(np.arccos(tmp) + np.arctan2(nx1, c0[2] - c1) - np.pi / 2) else: tmp = (s2_sq + c2 ** 2 - k_sq) / (2 * np.sqrt(s2_sq) * c2) if abs(tmp) > 1: return current_joint_pos, False q2_.append(- np.arccos(tmp) - np.arctan2(nx1 + 2 * a1, c0[2] - c1) - np.pi / 2) q2_.append(np.arccos(tmp) - np.arctan2(nx1 + 2 * a1, c0[2] - c1) - np.pi / 2) q2_idx = self.getNearestValue(q2_, current_joint_pos[1]) q2 = q2_[q2_idx] # B.3(J3) q3_ = [] if q1_idx == 0: tmp = (s1_sq - c2 ** 2 - k_sq) / (2 * c2 * np.sqrt(k_sq)) if abs(tmp) > 1: return current_joint_pos, False q3_.append(np.arccos(tmp) - np.arctan2(a2, c3) + np.pi / 2) q3_.append(- np.arccos(tmp) - np.arctan2(a2, c3) + np.pi / 2) else: tmp = (s2_sq - c2 ** 2 - k_sq) / (2 * c2 * np.sqrt(k_sq)) if abs(tmp) > 1: return current_joint_pos, False q3_.append(np.arccos(tmp) - np.arctan2(a2, c3) + np.pi / 2) q3_.append(- np.arccos(tmp) - np.arctan2(a2, c3) + np.pi / 2) q3 = q3_[q2_idx] # C. e11 = wrist_orn_m[0][0] e12 = wrist_orn_m[0][1] e13 = wrist_orn_m[0][2] e21 = wrist_orn_m[1][0] e22 = wrist_orn_m[1][1] e23 = wrist_orn_m[1][2] e31 = wrist_orn_m[2][0] e32 = wrist_orn_m[2][1] e33 = wrist_orn_m[2][2] s1p = np.sin(q1) s23p = np.sin(q2 + q3) c1p = np.cos(q1) c23p = np.cos(q2 + q3) mp = e13 * s23p * c1p + e23 * s23p * s1p + e33 * c23p # D.1(J4) q4_p = np.arctan2(e23 * c1p - e13 * s1p, e13 * c23p * c1p + e23 * c23p * s1p - e33 * s23p) if abs(current_joint_pos[3] - q4_p) > np.pi * 1.9: if q4_p > 0: q4_p = -np.pi * 2.0 + q4_p else: q4_p = np.pi * 2.0 + q4_p if q4_p > 0: q4_q = q4_p - np.pi elif q4_p <= 0: q4_q = q4_p + np.pi # D.2(J5) q5_p = np.arctan2(np.sqrt(1 - mp ** 2), mp) q5_q = - q5_p # D.3(J6) q6_p = np.arctan2(e12 * s23p * c1p + e22 * s23p * s1p + e32 * c23p, -e11 * s23p * c1p - e21 * s23p * s1p - e31 * c23p) if abs(current_joint_pos[5] - q6_p) > np.pi * 1.9: if q6_p > 0: q6_p = -np.pi * 2.0 + q6_p else: q6_p = np.pi * 2.0 + q6_p if q6_p > 0: q6_q = q6_p - np.pi elif q6_p <= 0: q6_q = q6_p + np.pi # E. if abs(current_joint_pos[3] - q4_p) + abs(current_joint_pos[5] - q6_p) <= \ abs(current_joint_pos[3] - q4_q) + abs(current_joint_pos[5] - q6_p): q4 = q4_p q5 = q5_p q6 = q6_p q4_idx = 0 else: q4 = q4_q q5 = q5_q q6 = q6_q q4_idx = 1 if abs(q4) > np.pi * (190.0/180.0): return current_joint_pos, False return [q1, q2, q3, q4, q5, q6], True
def angle_between_vectors(v1, v2): """ Compute the angle (in rad) between the two vectors v1 and v2. """ v1_u = v1 / np.linalg.norm(v1) v2_u = v2 / np.linalg.norm(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def specific_atom_centric(structure, atom1, atom2, atom3, radius): point = args.point if not point == None: site_search = struct.get_sites_in_sphere(point, include_image=True, r=args.radius) sites_init = [ i[0].coords for i in site_search if i[0].as_dict()['species'][0]['element'] == atom1 ] sites_init_frac = [ i[0].frac_coords for i in site_search if i[0].as_dict()['species'][0]['element'] == atom1 ] else: sites_init = [ np.array(x['xyz']) for x in struct.as_dict()['sites'] if x['species'][0]['element'] == atom1 ] sites_init_frac = [ np.array(x['abc']) for x in struct.as_dict()['sites'] if x['species'][0]['element'] == atom1 ] total_data = [] for i, site in enumerate(sites_init): def second_site_search(site1, new_radius): second_sites = struct.get_sites_in_sphere(site1, include_image=True, r=new_radius) new_second_sites = [] new_second_sites_frac = [] for x in second_sites: if x[0].as_dict()['species'][0]['element'] == atom2: if not np.array_equal(x[0].coords, site1): new_second_sites.append(x[0].coords) new_second_sites_frac.append(x[0].frac_coords) return (new_second_sites, new_second_sites_frac) new_second_sites, new_second_sites_frac = second_site_search( site, radius) if new_second_sites == []: for new_radius in np.linspace(float(radius + 0.1), radius + 5, 100): new_second_sites, new_second_sites_frac = second_site_search( site, new_radius) if not new_second_sites == []: break for j, second_site in enumerate(new_second_sites): def third_site_search(site1, site2, new_radius): third_sites = struct.get_sites_in_sphere( site2, include_image=True, r=new_radius) new_third_sites = [] new_third_sites_frac = [] for x in third_sites: if x[0].as_dict()['species'][0]['element'] == atom3: if not np.array_equal(x[0].coords, site1): if not np.array_equal(x[0].coords, site2): new_third_sites.append(x[0].coords) new_third_sites_frac.append( x[0].frac_coords) return (new_third_sites, new_third_sites_frac) new_third_sites, new_third_sites_frac = third_site_search( site, second_site, radius) if new_third_sites == []: for new_radius in np.linspace(float(radius + 0.1), radius + 5, 100): new_third_sites, new_third_sites_frac = third_site_search( site, second_site, new_radius) if not new_third_sites == []: break for k, third_site in enumerate(new_third_sites): ba = site - second_site bc = third_site - second_site cosine_angle = np.dot( ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)) angle = np.around(np.degrees(np.arccos(cosine_angle)), decimals=args.decimal) total_data.append([ sites_init_frac[i], new_second_sites_frac[j], new_third_sites_frac[k], angle ]) np.set_printoptions( formatter={'float': lambda x: "{0:0.3f}".format(x)}) df = pd.DataFrame(total_data, columns=[atom1, atom2, atom3, 'angle']) df = df.sort_values(by=['angle']) df = df.reset_index(drop=True) if args.verbose == False: df = df.drop_duplicates(subset='angle', keep='first') df = df.reset_index(drop=True) return (df) else: return (df)
def get_fluxmap(self, eners, local_coords, resolution): if resolution == None: resolution = 30 else: resolution = int(N.ceil(resolution / 3.) * 3) flux = N.zeros(resolution**2) if len(eners) == 0: return flux local_rads = N.sqrt(N.sum(local_coords[:2]**2, axis=0)) local_angs = N.arctan2(local_coords[1], local_coords[0]) local_angs[local_angs < 0.] = local_angs[local_angs < 0.] + 2. * N.pi dangcut = N.arccos(self.x_cut / self._Re) if dangcut < (N.pi / 2.): angcut1 = N.arange(0., dangcut, dangcut / (resolution / 3)) angdisk = N.arange(dangcut, 2. * N.pi - dangcut, 2. * (N.pi - dangcut) / (resolution / 3)) angcut2 = N.linspace(2. * N.pi - dangcut, 2. * N.pi, (resolution / 3) + 1) angs = N.hstack((angcut1, angdisk, angcut2)) rs = N.linspace(0., self._Re, resolution + 1) xs = N.linspace(0., self.x_cut, resolution + 1) # Treat differently the disk region and the cut region # Disk region: bin according to radii and angle disk = angs[resolution / 3:2 * resolution / 3 + 1] enersdisk = N.histogram2d(local_rads, local_angs, bins=[rs, disk], weights=eners)[0] drs = N.tile(rs[1:] - rs[:-1], (len(disk) - 1, 1)).T ravgs = N.tile((rs[1:] + rs[:-1]) / 2., (len(disk) - 1, 1)).T dangs = N.tile(N.abs(disk[1:] - disk[:-1]), (len(rs) - 1, 1)) areas = drs * ravgs * dangs fluxdisk = N.hstack(enersdisk / areas) # Cut region: bin according to x coord and angle cut1 = angs <= dangcut enerscut1 = N.histogram2d(local_coords[0], local_angs, bins=[xs, angs[cut1]], weights=eners)[0] dxs = N.tile(xs[1:] - xs[:-1], (len(angs[cut1]) - 1, 1)) dys = (xs[:-1] * N.vstack(N.tan(angs[cut1][:-1])) + xs[1:] * N.vstack(N.tan(angs[cut1][1:]) / 2.)) areas = N.abs(dxs * dys) fluxcut1 = N.hstack(enerscut1 / areas.T) cut2 = angs >= (2. * N.pi - dangcut) enerscut2 = N.histogram2d(local_coords[0], local_angs, bins=[xs, angs[cut2]], weights=eners)[0] dxs = N.tile(xs[1:] - xs[:-1], (len(angs[cut2]) - 1, 1)) dys = (xs[:-1] * N.vstack(N.tan(angs[cut2][:-1])) + xs[1:] * N.vstack(N.tan(angs[cut2][1:]) / 2.)) areas = N.abs(dxs * dys) fluxcut2 = N.hstack(enerscut2 / areas.T) for i in xrange(len(flux) / 3): idx = resolution / 3 flux[resolution * i:resolution * i + idx] = fluxcut1[idx * i:idx * (i + 1)] flux[resolution * i + idx:resolution * i + 2 * idx] = fluxdisk[idx * i:idx * (i + 1)] flux[resolution * i + 2 * idx:resolution * i + 3 * idx] = fluxcut2[idx * i:idx * (i + 1)] else: flux = N.zeros(resolution**2) angs = N.linspace(dangcut, 2. * N.pi - dangcut, resolution + 1) x, y, z = self.mesh(resolution) xA = x[:-1, :-1] xB = x[:-1, 1:] xC = x[1:, 1:] xD = x[1:, :-1] yA = y[:-1, :-1] yB = y[:-1, 1:] yC = y[1:, 1:] yD = y[1:, :-1] a = N.sqrt((xB - xA)**2 + (yB - yA)**2) b = N.sqrt((xC - xB)**2 + (yC - yB)**2) c = N.sqrt((xD - xC)**2 + (yD - yC)**2) d = N.sqrt((xA - xD)**2 + (yA - yD)**2) p = N.sqrt((xC - xA)**2 + (yC - yA)**2) q = N.sqrt((xD - xB)**2 + (yD - yB)**2) # Quadrilateral area: areas = 0.25 * N.sqrt(4. * p**2 * q**2 - (b**2 + d**2 - a**2 - c**2)**2) # Add the disk cap that is unaccounted for to the last element areas[:, -1] += ( angs[1:] - angs[:-1] ) / 2. * self._Re**2 - b[:, -1] / 2. * self._Re * N.cos( N.arcsin(b[:, -1] / (2. * self._Re))) # Binning: for i in xrange(int(resolution)): # Separations lines equation coefficients: a_seps = N.tile((y[i + 1] - y[i]) / (x[i + 1] - x[i]), (local_coords.shape[1], 1)) b_seps = y[i] - a_seps * x[i] # Equation of the line from the origin goping through the hit coordinate: local_a = local_coords[1] / local_coords[0] # Intersection with the "radial" separations: local_inters_x = b_seps / (N.vstack(local_a) - a_seps) local_inters_x[N.isnan(local_inters_x)] = self.x_cut local_inters_y = N.vstack(local_a) * local_inters_x inter_rads = N.vstack( N.sqrt(local_inters_x**2 + local_inters_y**2)) in_wedge = N.logical_and((local_angs >= angs[i]), (local_angs < angs[i + 1])) if in_wedge.any(): inter_rads[:, -1] = self._Re # to grab the hits that are beyond the last separation but before the end of the disk. in_bins = N.logical_and( (N.vstack(local_rads) >= inter_rads[:, :-1]), (N.vstack(local_rads) < inter_rads[:, 1:])) #flux[i*resolution:(i+1)*resolution] = N.sum(N.vstack(eners)*in_bins, axis=0)/areas[i] flux[i:resolution**2:resolution] = N.sum( N.vstack(eners) * in_bins, axis=0) / areas[i] return flux
def minimum_curvature(md, inc, azi): """Minimum curvature Calculate TVD, northing, easting, and dogleg, using the minimum curvature method. This is the inner workhorse of the min_curve_method, and only implement the pure mathematics. As a user, you should probably use the min_curve_method function. This function considers md unitless, and assumes inc and azi are in radians. Parameters ---------- md : array_like of float measured depth inc : array_like of float inclination in radians azi : array_like of float azimuth in radians Returns ------- tvd : array_like of float true vertical depth northing : array_like of float easting : array_like of float dogleg : array_like of float Notes ----- This function does not insert surface location """ md, inc, azi = checkarrays(md, inc, azi) # extract upper and lower survey stations md_upper, md_lower = md[:-1], md[1:] inc_upper, inc_lower = inc[:-1], inc[1:] azi_upper, azi_lower = azi[:-1], azi[1:] cos_inc = np.cos(inc_lower - inc_upper) sin_inc = np.sin(inc_upper) * np.sin(inc_lower) cos_azi = 1 - np.cos(azi_lower - azi_upper) dogleg = np.arccos(cos_inc - (sin_inc * cos_azi)) # ratio factor, correct for dogleg == 0 values rf = 2 / dogleg * np.tan(dogleg / 2) rf = np.where(dogleg == 0., 1, rf) md_diff = md_lower - md_upper upper = np.sin(inc_upper) * np.cos(azi_upper) lower = np.sin(inc_lower) * np.cos(azi_lower) * rf northing = np.cumsum(md_diff / 2 * (upper + lower)) upper = np.sin(inc_upper) * np.sin(azi_upper) lower = np.sin(inc_lower) * np.sin(azi_lower) * rf easting = np.cumsum(md_diff / 2 * (upper + lower)) tvd = np.cumsum(md_diff / 2 * (np.cos(inc_upper) + np.cos(inc_lower)) * rf) return tvd, northing, easting, dogleg
dir_create("./temp/percept") sig = get_param_sig(tau_relative, fixed_tau, use_prior, update_only_on_error) pdfpath = "./temp/percept/percept_%s.pdf" % sig dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1) plot_initial_only = False # logger.debug("Oracle: %s" % str(oracle.y)) u = np.array([np.cos(u_theta), np.sin(u_theta)]) if plot_initial_only: budget = 0 title = None else: budget = 30 title = r"initial (${\theta}$: %1.2f)" % (np.arccos(u.dot(aad.w)) * 180. / np.pi) plot_learning(x, y, None, queried, aad, u_theta, dp, title=title, plot_xtau=False, plot_theta=plot_initial_only, plot_legends=plot_initial_only ) for iter in range(budget): # active learning step q = aad.get_query(x, queried) queried[q] = oracle.get_label(q) # logger.debug(queried) # logger.debug("q: %d, label: %d" % (q, queried[q])) if (not update_only_on_error) or queried[q] != 1: if update_only_on_error: logger.debug("updating on error...") aad.update(x, queried)
def verify_triangle_binding(self, distance, first_bond, angle_res): # Gather pairs n = len(self.s.part) angle_res = angle_res - 1 expected_pairs = [] for i in range(n): for j in range(i + 1, n, 1): if self.s.distance(self.s.part[i], self.s.part[j]) <= distance: expected_pairs.append((i, j)) # Find triangles # Each element is a particle id, a bond id and two bond partners in # ascending order expected_angle_bonds = [] for i in range(n): for j in range(i + 1, n, 1): for k in range(j + 1, n, 1): # Ref to particles p_i = self.s.part[i] p_j = self.s.part[j] p_k = self.s.part[k] # Normalized distance vectors d_ij = np.copy(p_j.pos - p_i.pos) d_ik = np.copy(p_k.pos - p_i.pos) d_jk = np.copy(p_k.pos - p_j.pos) d_ij /= np.sqrt(np.sum(d_ij**2)) d_ik /= np.sqrt(np.sum(d_ik**2)) d_jk /= np.sqrt(np.sum(d_jk**2)) if self.s.distance(p_i, p_j) <= distance and self.s.distance( p_i, p_k) <= distance: id_i = first_bond._bond_id + \ int(np.round( np.arccos(np.dot(d_ij, d_ik)) * angle_res / np.pi)) expected_angle_bonds.append((i, id_i, j, k)) if self.s.distance(p_i, p_j) <= distance and self.s.distance( p_j, p_k) <= distance: id_j = first_bond._bond_id + \ int(np.round( np.arccos(np.dot(-d_ij, d_jk)) * angle_res / np.pi)) expected_angle_bonds.append((j, id_j, i, k)) if self.s.distance(p_i, p_k) <= distance and self.s.distance( p_j, p_k) <= distance: id_k = first_bond._bond_id + \ int(np.round( np.arccos(np.dot(-d_ik, -d_jk)) * angle_res / np.pi)) expected_angle_bonds.append((k, id_k, i, j)) # Gather actual pairs and actual triangles found_pairs = [] found_angle_bonds = [] for i in range(n): for b in self.s.part[i].bonds: if len(b) == 2: self.assertEqual(b[0]._bond_id, self.H._bond_id) found_pairs.append(tuple(sorted((i, b[1])))) elif len(b) == 3: partners = sorted(b[1:]) found_angle_bonds.append( (i, b[0]._bond_id, partners[0], partners[1])) else: raise Exception( "There should be only 2 and three particle bonds") # The order between expected and found bonds does not always match # because collisions occur in random order. Sort stuff found_pairs = sorted(found_pairs) found_angle_bonds = sorted(found_angle_bonds) expected_angle_bonds = sorted(expected_angle_bonds) self.assertEqual(expected_pairs, found_pairs) if not expected_angle_bonds == found_angle_bonds: # Verbose info print("expected:", expected_angle_bonds) missing = [] for b in expected_angle_bonds: if b in found_angle_bonds: found_angle_bonds.remove(b) else: missing.append(b) print("missing", missing) print("extra:", found_angle_bonds) print() self.assertEqual(expected_angle_bonds, found_angle_bonds)
omega = -B0 * gamma ms = IntBloch(m0, lambda t: Bcirc(t, localB0, B1, omega), times, gamma) print("vector") print(ms) finalMs.append(ms) #print("M") #print(ms) #mTnorm=np.linalg.norm(ms[0:2]) #phase=np.degrees(np.arctan2(ms[1],ms[0])) #print("phase") #print(phase) #phase=phase-phi0 #phase=np.tan(np.radians(phase)) #print(phase) theta = np.degrees(np.arccos(ms[2])) #np.arctan2(mTnorm,ms[2])) thetas.append(theta) #phases.append(phase) dOmega = (G * x * gamma) omegaMag = np.power((G * x * gamma)**2 + (gamma * B1)**2, 0.5) #sinAngle2=(2./(1.+(G*x/B1)**2)) #expTheta=1.-sinAngle2*(np.sin(0.5*times[-1]*omegaMag))**2 #expThetas.append(np.degrees(np.arccos(expTheta))) #expThetas.append(-(expTheta-1)) sinterm = np.abs((gamma * B1 * times[-1] / 2) * np.sinc(omegaMag * times[-1] / (np.pi * 2.))) expThetas.append(2 * np.degrees(np.arcsin(sinterm))) #expThetas3.append(np.degrees(2*sinterm))
import os from numpy import arccos #Load unit conversion sys.path.append(os.path.abspath("./pyprop/pyprop/utilities")) import units from units import ElectricFieldAtomicFromIntensitySI as field_from_intensity from units import AngularFrequencyAtomicFromWavelengthSI as freq_from_wavelength from units import IntensitySIFromElectricFieldAtomic as intensity_from_field #Unit conversion factors femtosec_to_au = 1e-15 / units.constantsAU.time #Pulse duration from intensity full with half maximum #for a cos**2 pulse fwhm_intensity = pi / arccos(0.5**0.25) / 2.0 #Converts wavelength in nm -> time of one cycle a.u. cycletime_from_wavelength = lambda l: 2 * pi / freq_from_wavelength(l) #Converts frequency in a.u. -> time of one cycle a.u. cycletime_from_frequency = lambda f: 2 * pi / f #Ponderomotive energy #ponderomotive_energy = lambda I, omega: I / (4.0 * omega**2) ponderomotive_energy = lambda E0, omega: E0**2 / (4.0 * omega**2) #eV -> au eV_to_au = 3.674932540e-2
def get_metrics(endmembersPredicted, image, exec_id, abundancesPredicted=None, path_abundances_GT=None, show_images=False): import matplotlib if show_images == False: matplotlib.use('Agg') K = endmembersPredicted.shape[1] endmembersGT = scipy.io.loadmat(path_abundances_GT)['M'] if image == 'cuprite': bands = scipy.io.loadmat(path_abundances_GT)['slctBnds'][0, :] endmembersGT = endmembersGT[bands] softmaxed = softmax(endmembersGT.T) endmembersGT = softmaxed.T rmse = 0.0 sad = 0.0 if (path_abundances_GT != None): # Pair predicted/true equal endmembers before SAD endm_s1 = endmembersPredicted endm_gt = endmembersGT dists = [] for col in range(endm_s1.shape[1]): act_sim = [] row = endm_s1[:, col] for col2 in range(endm_gt.shape[1]): row2 = endm_gt[:, col2] act_sim.append(sp_dist.cosine(row, row2)) dists.append(act_sim) dists = np.array(dists) new_classes = [0] * K en2 = copy.deepcopy(endmembersPredicted) for i in range(K): (fil, col) = np.unravel_index(dists.argmin(), dists.shape) endmembersPredicted[:, col] = en2[:, fil] new_classes[fil] = col dists[:, col] = 100000 dists[fil, :] = 100000 del en2, new_classes, dists, endm_gt, endm_s1 from numpy.linalg import norm cos_sim = 0 for i in range(K): b = endmembersGT[:, i] a = endmembersPredicted[:, i] cos_sim += np.arccos(np.dot(a, b) / (norm(a) * norm(b))) sad = cos_sim / float(K) if ('A' in scipy.io.loadmat(path_abundances_GT).keys()): abundancesGT = scipy.io.loadmat(path_abundances_GT)['A'].T abundancesGT = \ np.transpose(\ abundancesGT.reshape(abundancesPredicted.shape[1], abundancesPredicted.shape[0], abundancesGT.shape[1]), (1,0,2)) # Pair predicted/true equal abundances before RMSE image_s1 = abundancesPredicted.reshape(-1, K) image_gt = abundancesGT.reshape(-1, K) dists = [] for col in range(image_s1.shape[1]): act_sim = [] row = image_s1[:, col] for col2 in range(image_gt.shape[1]): row2 = image_gt[:, col2] act_sim.append(sp_dist.cosine(row, row2)) dists.append(act_sim) dists = np.array(dists) new_classes = [0] * K ab2 = copy.deepcopy(abundancesPredicted) for i in range(K): (fil, col) = np.unravel_index(dists.argmin(), dists.shape) abundancesPredicted[:, :, col] = ab2[:, :, fil] new_classes[fil] = col dists[:, col] = 100000 dists[fil, :] = 100000 del ab2, new_classes, dists, image_gt, image_s1 rmse = np.sqrt( mean_squared_error(abundancesGT.reshape(-1, K), abundancesPredicted.reshape(-1, K))) mosaicPred = abundancesPredicted[:, :, 0] mosaicGT = abundancesGT[:, :, 0] for i in range(1, K): mosaicPred = np.hstack((mosaicPred, abundancesPredicted[:, :, i])) mosaicGT = np.hstack((mosaicGT, abundancesGT[:, :, i])) mosaicFinal = np.vstack((mosaicPred, mosaicGT)) if show_images: plt.imshow(mosaicFinal) plt.show() plt.clf() else: plt.imsave(('outputs/images/abundances_' + image + '_' + exec_id + '.png'), mosaicFinal) else: mosaicPred = abundancesPredicted[:, :, 0] for i in range(1, K): mosaicPred = np.hstack((mosaicPred, abundancesPredicted[:, :, i])) if show_images: plt.imshow(mosaicPred) plt.show() plt.clf() else: plt.imsave(('outputs/images/abundances_' + image + '_' + exec_id + '.png'), mosaicPred) if image == 'cuprite': endmembersPredicted = endmembersPredicted[3:, :] plt.plot(endmembersPredicted) if show_images: plt.show() else: plt.savefig( ('outputs/images/endmembers_' + image + '_' + exec_id + '.png'), bbox_inches='tight', pad_inches=0.2, dpi=200) return rmse, sad # -*- coding: utf-8 -*-
def eucl2deg(eucl): raw = (180 / np.pi) * np.arccos(np.clip(1 - 0.5 * eucl**2, -1, 1)) return np.minimum(raw, 180 - raw)
def get_wavefront_parallel(data,aim,i,t,side,PAAM_ang,ret,mode='opposite',precision=0,ksi=[0,0],angles=False): [i_self,i_left,i_right] = utils.i_slr(i) if mode=='opposite': if side=='l': tdel = data.L_sl_func_tot(i_self,t) if data.calc_method=='Waluschka': tdel0=tdel elif data.calc_method=='Abram': tdel0=0 if angles==False: tele_ang = aim.tele_l_ang(i_self,t+tdel0) else: tele_ang=angles coor_start = beam_coor_out(data,i_self,t,tele_ang,PAAM_ang,aim.offset_tele['l']) coor_end = aim.tele_r_coor(i_left,t+tdel) start=aim.tele_l_start(i_self,t+tdel0) end=aim.tele_r_start(i_left,t+tdel)+coor_end[1]*ksi[1]+coor_end[2]*ksi[0] elif side=='r': tdel=data.L_sr_func_tot(i_self,t) if data.calc_method=='Waluschka': tdel0=tdel elif data.calc_method=='Abram': tdel0=0 if angles==False: tele_ang = aim.tele_r_ang(i_self,t+tdel0) else: tele_ang=angles coor_start = beam_coor_out(data,i_self,t,tele_ang,PAAM_ang,aim.offset_tele['r']) coor_end = aim.tele_l_coor(i_right,t+tdel) start = aim.tele_r_start(i_self,t+tdel0) end=aim.tele_l_start(i_right,t+tdel)+coor_end[1]*ksi[1]+coor_end[2]*ksi[0] [zoff,yoff,xoff]=LA.matmul(coor_start,end-start) if precision==0: R = zoff # Not precise elif precision==1: try: [piston,z_extra] = wfe.z_solve(xoff,yoff,zoff,ret='all') except: [piston,z_extra] = [np.nan,np.nan] R = wfe.R(piston) R_vec = np.array([(R**2-xoff**2-yoff**2)**0.5,yoff,xoff]) tele_vec = LA.matmul(coor_start,-coor_end[0]) angx_R = np.sign(R_vec[2])*abs(np.arctan(R_vec[2]/R_vec[0])) angy_R = np.sign(R_vec[1])*abs(np.arctan(R_vec[1]/R_vec[0])) angx_tele = np.sign(tele_vec[2])*abs(np.arctan(tele_vec[2]/tele_vec[0])) angy_tele = np.sign(tele_vec[1])*abs(np.arctan(tele_vec[1]/tele_vec[0])) angx = (angx_tele-angx_R) angy = (angy_tele-angy_R) elif mode=='self': if side=='l': tdel = data.L_rl_func_tot(i_self,t) if data.calc_method=='Waluschka': tdel0=tdel elif data.calc_method=='Abram': tdel0=0 if angles==False: tele_ang = aim.tele_r_ang(i_left,t-tdel) tele_ang_end = aim.tele_l_ang(i_self,t-tdel0) PAAM_ang = aim.beam_r_ang(i_left,t-tdel) elif len(angles)>=2: tele_ang_end = angles[0] tele_ang = angles[2] PAAM_ang = aim.beam_r_ang(i_left,t-tdel) coor_start = beam_coor_out(data,i_left,t-tdel,tele_ang,PAAM_ang,aim.offset_tele['r']) coor_end = coor_tele(data,i_self,t,tele_ang_end) start = LA.unit(coor_start[0])*data.L_tele+data.putp(i_left,t-tdel) end = LA.unit(coor_end[0])*data.L_tele+data.putp(i_self,t-tdel0)+coor_end[1]*ksi[1]+coor_end[2]*ksi[0] elif side=='r': tdel = data.L_rr_func_tot(i_self,t) if data.calc_method=='Waluschka': tdel0=tdel elif data.calc_method=='Abram': tdel0=0 if angles==False: tele_ang = aim.tele_l_ang(i_right,t-tdel) tele_ang_end = aim.tele_r_ang(i_self,t-tdel0) PAAM_ang = aim.beam_l_ang(i_right,t-tdel) elif len(angles)>=2: tele_ang_end = angles[0] tele_ang = angles[2] PAAM_ang = aim.beam_l_ang(i_right,t-tdel) coor_start = beam_coor_out(data,i_right,t-tdel,tele_ang,PAAM_ang,aim.offset_tele['l']) coor_end = coor_tele(data,i_self,t,tele_ang_end) start = LA.unit(coor_start[0])*data.L_tele+data.putp(i_right,t-tdel) end = LA.unit(coor_end[0])*data.L_tele+data.putp(i_self,t-tdel0)+coor_end[1]*ksi[1]+coor_end[2]*ksi[0] [zoff,yoff,xoff]=LA.matmul(coor_start,end-start) out=OUTPUT(aim) if precision==0: R = zoff # Not precise elif precision==1: try: [piston,z_extra] = out.z_solve(xoff,yoff,zoff,ret='all') except: [piston,z_extra] = [np.nan,np.nan] R = out.R(piston) R_vec = np.array([(R**2-xoff**2-yoff**2)**0.5,yoff,xoff]) R_vec_origin = LA.matmul(np.linalg.inv(coor_start),R_vec) R_vec_tele_rec = LA.matmul(coor_end,-R_vec_origin) angx = np.arctan(abs(R_vec_tele_rec[2]/R_vec_tele_rec[0]))*np.sign(R_vec_tele_rec[2]) angy = np.arctan(abs(R_vec_tele_rec[1]/R_vec_tele_rec[0]))*np.sign(R_vec_tele_rec[1]) if ret=='angy': return angy elif ret=='angx': return angx elif ret=='tilt': return (angx**2+angy**2)**0.5 elif ret=='xoff': return xoff elif ret=='yoff': return yoff elif ret=='r': return (xoff**2 +yoff**2)**0.5 elif ret=='all': ret_val={} ret_val['start']=start ret_val['end']=end ret_val['zoff']=zoff ret_val['yoff']=yoff ret_val['xoff']=xoff ret_val['coor_start']=coor_start ret_val['coor_end']=coor_end ret_val['bd_original_frame'] = np.array(coor_start[0]) ret_val['bd_receiving_frame'] = LA.matmul(coor_end,ret_val['bd_original_frame']) ret_val['angx_func_rec'] = angx ret_val['angy_func_rec'] = angy ret_val['R_vec_tele_rec']=R_vec_tele_rec #ret_val['tilt'] = np.arccos(R_vec_tele_rec[0]/np.linalg.norm(R_vec_tele)) #ret_val['tilt']=(angx**2+angy**2)**0.5 #ret_val['tilt']=LA.angle(R_vec_tele,(angx**2+angy**2)**0.5 if precision==1: ret_val['piston']=piston ret_val['z_extra'] = z_extra ret_val['R']=R ret_val["R_vec_beam_send"] = R_vec ret_val['R_vec_origin'] = R_vec_origin ret_val['r']=(xoff**2+yoff**2)**0.5 FOV_beamline = np.arccos(-ret_val['bd_receiving_frame'][0]/np.linalg.norm(ret_val['bd_receiving_frame'])) FOV_wavefront = LA.angle(-R_vec_origin,coor_end[0]) FOV_position = LA.angle(start-end,coor_end[0]) ret_val['tilt']=FOV_wavefront ret_val['FOV_beamline']=FOV_beamline ret_val['FOV_wavefront']=FOV_wavefront ret_val['FOV_position']=FOV_position return ret_val