def update(self, p1x, p1y, p2x, p2y, width, height, mat = 0): '''Update the quaterion with a new rotation position derived from the first point (p1) and the second point (p2). The the mat parameter is not currently used.''' if p1x == p2x and p1y == p2y: self.quat = quaternion(1, 0, 0, 0) else: # First, figure out z-coordinates for projection of p1 and p2 to # deformed sphere p1x_u = self.scale*p1x/width - 1.0 p1y_u = 1.0 - self.scale*p1y/height p2x_u = self.scale*p2x/width - 1.0 p2y_u = 1.0 - self.scale*p2y/height P1 = (p1x_u,p1y_u,self.__track_project_to_sphere(p1x_u, p1y_u)) P2 = (p2x_u,p2y_u,self.__track_project_to_sphere(p2x_u, p2y_u)) a = [(P2[1]*P1[2]) - (P2[2]*P1[1]), (P2[2]*P1[0]) - (P2[0]*P1[2]), (P2[0]*P1[1]) - (P2[1]*P1[0])] # Figure out how much to rotate around that axis. d = map(lambda x, y: x - y, P1, P2) t = math.sqrt(d[0]**2 + d[1]**2 + d[2]**2) / (2.0 * self.size) # Avoid problems with out-of-control values... t = max(min(t, 1.0), -1.0) scale = t*math.sqrt(a[0]**2 + a[1]**2 + a[2]**2) q = map(lambda x, y: x*y, a, [scale]*3) + [math.sqrt(1.0-t**2)] self.quat = quaternion(q[0], q[1], q[2], q[3])
def gao(n): m = int(math.sqrt(n)) if m * m == n: return 0 ha = 1; ka = 0 d = m a = n b = -m c = 1 hb = d; kb = 1 if hb * hb - n * kb * kb == 1: return hb while True: nc = a - b * b nc /= c nd = int((math.sqrt(a) - b) / nc) nb = -b - nd * nc c = nc; d = nd; b = nb; hc = d * hb + ha kc = d * kb + ka ha = hb ka = kb hb = hc kb = kc if hc * hc - n * kc * kc == 1: return hc return 0
def test_executer(): f = Feuille() o = f.objets f.executer("A = (1, 2)") f.executer("A.x += 1") assert(o.A.x == 2) f.executer("A' = 3, 4") f.executer("s = [A A']") f.executer("I = Milieu(s)") assertAlmostEqual(o.I.xy, (2.5, 3)) f.executer("del") assert("I" not in o.noms) assert("A_prime" in o.noms) f.executer("del") f.executer("del") assert("A_prime" not in o.noms) f.executer("= (1, 2)") assert(o.M1.coordonnees == (1, 2)) f.executer("txt0 = `Bonjour !`") f.executer(r"txt1 = `$P\`ere et m\`ere ont un accent grave.$`") f.executer("chaine_vide = ``") assert(o.txt0.texte == "Bonjour !") assert(o.txt1.texte == r"$P\`ere et m\`ere ont un accent grave.$") assert(o.chaine_vide.texte == "") f.executer("M = (5, 7)") f.executer("C = _") assert(o.C.x == 5) f.executer("=((i,sqrt(i)) for i in (3,4,5,6))") assert(o.M2.xy == (3, sqrt(3))) assert(o.M3.xy == (4, sqrt(4))) assert(o.M4.xy == (5, sqrt(5))) assert(o.M5.xy == (6, sqrt(6)))
def rechne(self): q = self.q.value() T = self.vext.value()*1000.+self.term.value()*(float(self.mout.value())/self.min.value()+q)*1.e6*(self.t.value()/100.+1.) a = self.a.value()/1000. xi = 30000.*q/T/a**2 chi = math.sqrt(2.*self.mout.value()*T)/q*math.sqrt(1.660538921e-27 / 1.602176565e-19) for i in range(0,4): self.percent[i].setValue(ky[i]/xi*100.) self.ast[i].setValue((kx[i]-ky[i])/xi*100.) if (self.GIC.isChecked()): radius[2] = 1.545 if (self.TOF.isChecked()): radius[2] = 1.545*30./20. if (self.SIG.isChecked()): radius[2] = 1.545*30./(-15.) if (self.AFD.isChecked()): radius[2] = -1.545 for i in range(1,4): feld = chi/radius[i] self.mfeld[i].setValue(feld) self.mstrom[i].setValue(self.magnetstrom(i,feld)) feld = math.sqrt(2.*self.min.value()*self.vext.value()*1000.)*math.sqrt(1.660538921e-27 / 1.602176565e-19)/radius[0] self.mfeld[0].setValue(feld) self.mstrom[0].setValue(self.magnetstrom(0,feld)) self.espannung[0].setValue(self.vext.value()*.05325/.469) self.espannung[1].setValue(T/q*.032/2.816/1000.)
def legIK(self, X, Y, Z, resolution): """ Compute leg servo positions. """ ans = [0,0,0,0] # (coxa, femur, tibia) try: # first, make this a 2DOF problem... by solving coxa ans[0] = radToServo(atan2(X,Y), resolution) trueX = int(sqrt(sq(X)+sq(Y))) - self.L_COXA im = int(sqrt(sq(trueX)+sq(Z))) # length of imaginary leg # get femur angle above horizon... q1 = -atan2(Z,trueX) d1 = sq(self.L_FEMUR)-sq(self.L_TIBIA)+sq(im) d2 = 2*self.L_FEMUR*im q2 = acos(d1/float(d2)) ans[1] = radToServo(q1+q2, resolution) # and tibia angle from femur... d1 = sq(self.L_FEMUR)-sq(im)+sq(self.L_TIBIA) d2 = 2*self.L_TIBIA*self.L_FEMUR; ans[2] = radToServo(acos(d1/float(d2))-1.57, resolution) except: if self.debug: "LegIK FAILED" return [1024,1024,1024,0] if self.debug: print "LegIK:",ans return ans
def cal_delta_X10811(X): #X为条纹位置数组 #计算δx global DELTA_X,UA_10DELTA_X,UB_10DELTA_X,U_10DELTA_X,U_DELTA_X sum=0 for x in range(len(X)/2): sum+=X[x+len(X)/2]-X[x] delta_x=sum/(len(X)/2)**2 #大写常量作为格式化后要打印在tex文件里的数 DELTA_X = ToScience(delta_x) #计算不确定度 sum=0 for x in range(len(X)/2): sum+=(10*(X[x+len(X)/2]-X[x])-10*delta_x)**2 ua_10delta_x=math.sqrt(sum/(len(X)*(len(X)-1))) ub_10delta_x=0.01/(2*math.sqrt(3)) u_delta_x=math.sqrt(ua_10delta_x**2+ub_10delta_x**2)/1000 UA_10DELTA_X = ToScience(ua_10delta_x) UB_10DELTA_X = ToScience(ub_10delta_x) u_10delta_x = math.sqrt(ua_10delta_x**2+ub_10delta_x**2) U_10DELTA_X = ToScience(u_10delta_x) u_delta_x= u_10delta_x /10 U_DELTA_X = ToScience(u_delta_x) #delta_x为条纹间距 #u_delta_x为δx的不确定度 return delta_x,u_delta_x
def simplesnr(f,h,i=None,years=1,noisemodel=None,includewd=None): if i == None: h0 = h * math.sqrt(16.0/5.0) # rms average over inclinations else: h0 = h * math.sqrt((1 + math.cos(i)**2)**2 + (2*math.cos(i))**2) return h0 * math.sqrt(years * 365.25*24*3600) / math.sqrt(lisanoise(f,noisemodel,includewd))
def cosine_sim(util, id1, id2, th=3): num = 0 # get items util[id1] and util[id2] share in common shared = set(util[id1].keys()).intersection(util[id2].keys()) # optimization to not compute similarity between items # that don't meet threshold if len(shared) < th: return (0.0, len(shared)) firstmag = 0 secondmag = 0 # calculate dot product and magnitudes of shared items for item in shared: num += util[id1][item] * util[id2][item] firstmag += util[id1][item]**2 secondmag += util[id2][item]**2 # prevent denom == 0 firstmag = 1 if firstmag == 0 else firstmag secondmag = 1 if secondmag == 0 else secondmag # calculate magnitude of shared items in util[id2] denom = math.sqrt(firstmag) * math.sqrt(secondmag) return ((num/denom+1)/2, len(shared))
def standardMC_european_option(K, T, R, V, S0, N, option_type, path_num=10000): dt = T / N sigma = V drift = math.exp((R - 0.5 * sigma * sigma) * dt) sigma_sqrt = sigma * math.sqrt(dt) exp_RT = math.exp(-R * T) european_payoff = [] for i in xrange(path_num): former = S0 for j in xrange(int(N)): former = former * drift * math.exp(sigma_sqrt * numpy.random.normal(0, 1)) european_option = former if option_type == 1.0: european_payoff_call = exp_RT * max(european_option - K, 0) european_payoff.append(european_payoff_call) elif option_type == 2.0: european_payoff_put = exp_RT * max(K - european_option, 0) european_payoff.append(european_payoff_put) # Standard Monte Carlo p_mean = numpy.mean(european_payoff) p_std = numpy.std(european_payoff) p_confmc = (p_mean - 1.96 * p_std / math.sqrt(path_num), p_mean + 1.96 * p_std / math.sqrt(path_num)) return p_mean, p_std, p_confmc
def InAnscombeTrans(imdata, opt = 'exact'): """ The Inverse Anscombe Transform function. Parameters ---------- imdata: numpy array The input image array opt: str The options for inverse transform. Default set as 'asymptotic'. """ imdata = np.float32(imdata) if opt == 'algebra': z = imdata*imdata/4 - 3/8 if opt == 'asymptotic': z = imdata*imdata/4 - 1/8 if opt == 'exact': z = imdata*imdata/4 + math.sqrt(3/2)/imdata/4 - 11/8/(imdata*imdata) + \ 5/8*math.sqrt(3/2)/(imdata*imdata*imdata) - 1/8 z = np.maximum(z, np.zeros(imdata.shape, dtype=np.float32)) if opt == 'MMSE': print 'sth' return np.uint16(z)
def sieve(start=start,stop=stop): ''' Given a set of positive integers, remove all integer products of known primes, since they are thus not prime. When p is less than sqrt(stop) + 1, then all the remaining integers the current range of integers are prime. Recurse until the range containing p_target is processed and return that prime. start: starting integer stop: ending integer ''' integers = [i for i in xrange(start,stop)] if len(primes) == 0: # base case p = start # initial working prime else: for p in primes: # first check primes we know if p < int(sqrt(stop) + 1): remove_factors(p,integers) p = integers[0] # first integer left is now prime while p < int(sqrt(stop) + 1): # then sift through the new primes remove_factors(p,integers) # remove all factors of p from integers p = integers[integers.index(p) + 1] # next integer in integers is now prime primes.extend(integers) # all remaining integers are primes if len(primes) < target: # need to look for more primes sieve(stop,2*stop) # look at the next range of integers else: # found p_target print('The %dst prime is %d' % (target,primes[target + 1]))
def LagrangianDiffusion(particle, fieldset, time, dt): if particle.active == 1: to_lat = 1 / 1000. / 1.852 / 60. to_lon = to_lat / math.cos(particle.lat*math.pi/180) r_var = 1/3. #Rx = np.random.uniform(-1., 1.) #Ry = np.random.uniform(-1., 1.) Rx = random.uniform(-1., 1.) Ry = random.uniform(-1., 1.) #dK = RK4(fieldset.dK_dx, fieldset.dK_dy, particle.lon, particle.lat, time, dt) dKdx, dKdy = (fieldset.dK_dx[time, particle.lon, particle.lat, particle.depth], fieldset.dK_dy[time, particle.lon, particle.lat, particle.depth]) #half_dx = 0.5 * dKdx * dt * to_lon #half_dy = 0.5 * dKdy * dt * to_lat #print(particle.lon + half_dx) #print(particle.lat + half_dy) #K = RK4(fieldset.K, fieldset.K, particle.lon + half_dx, particle.lat + half_dy, time, dt) Kfield = fieldset.K[time, particle.lon, particle.lat, particle.depth] Rx_component = Rx * math.sqrt(2 * Kfield * dt / r_var) * to_lon Ry_component = Ry * math.sqrt(2 * Kfield * dt / r_var) * to_lat CorrectionX = dKdx * dt * to_lon CorrectionY = dKdy * dt * to_lat #print(Rx_component) #print(Ry_component) Dx = Rx_component Dy = Ry_component Cx = CorrectionX Cy = CorrectionY
def MoveOffLand(particle, fieldset, time, dt): onland = fieldset.LandMask[0, particle.lon, particle.lat, particle.depth] if onland == 1: oldlon = particle.lon - particle.Ax - particle.Dx - particle.Cx - particle.Vx oldlat = particle.lat - particle.Ay - particle.Dy - particle.Cy - particle.Vy lat_convert = 1 / 1000. / 1.852 / 60. lon_convert = to_lat / math.cos(oldlat*math.pi/180) Kfield_new = fieldset.K[time, oldlon, oldlat, particle.depth] r_var_new = 1/3. Dx_component = math.sqrt(2 * Kfield_new * dt / r_var_new) * lon_convert Dy_component = math.sqrt(2 * Kfield_new * dt / r_var_new) * lat_convert count = 0 particle.In_Loop = 0 while onland > 0: #return ErrorCode.ErrorOutOfBounds #print("particle on land at %s|%s" % (particle.lon, particle.lat)) particle.lon -= particle.Dx particle.lat -= particle.Dy Rx_new = random.uniform(-1., 1.) Ry_new = random.uniform(-1., 1.) particle.Dx = Dx_component * Rx_new particle.Dy = Dy_component * Ry_new particle.lon += particle.Dx particle.lat += particle.Dy onland = fieldset.LandMask[0, particle.lon, particle.lat, particle.depth] #print("attempting move to %s|%s" % (particle.lon, particle.lat)) #print("onland now = %s" % onland) count += 1 particle.In_Loop += 1 if count > 100: particle.lon -= particle.Ax + (particle.Dx + particle.Cx + particle.Vx)# * to_lon particle.lat -= particle.Ay + (particle.Dy + particle.Cy + particle.Vy)# * to_lat particle.Ax = particle.Ay = particle.Dx = particle.Dy = particle.Cx = particle.Cy = particle.Vx = particle.Vy = 0.0 onland = 0
def relIsoMethod(histogramForEstimation, function='expo', fitRange=(0.3, 1.6), signalRegion=(0., 0.1)): histogramForEstimation = histogramForEstimation.Clone('tmp') #investigate them binWidthOfOriginalHistoram = 0.01 rebinOfOriginalHistogram = 10 estimate = 0 relativeErrorSquared = 0 histogramForEstimation.Rebin(rebinOfOriginalHistogram) fit = None fit = performFit(histogramForEstimation, function, fitRange) if fit: estimate = fit.Integral(signalRegion[0], signalRegion[1]) / (binWidthOfOriginalHistoram * rebinOfOriginalHistogram) for parErr in range(0, fit.GetNumberFreeParameters()): par = fit.GetParameter(parErr) err = fit.GetParError(parErr) if not par == 0: relativeErrorSquared += (err / par) ** 2 result = {'estimate': estimate, 'absoluteError':sqrt(relativeErrorSquared) * estimate, 'relativeError':sqrt(relativeErrorSquared), 'fit':fit} return result
def parmeq(G0, G, GB, w0, Dw): beta = math.tan(Dw/2) * math.sqrt(abs(GB**2 - G0**2)) / math.sqrt(abs(G**2 - GB**2)) #global aeq,beq beq = [(G0 + G*beta), -2*G0*math.cos(w0), (G0 - G*beta)] beq= np.array(beq) / (1+beta) aeq = np.array([1, -2*math.cos(w0)/(1+beta), (1-beta)/(1+beta)]) return beq,aeq
def reviseDistance(self, currentDistance, currentCount): if currentCount <= 0: return self.maxDistance newDistance = float(currentDistance) * math.sqrt(self.targetCount) / math.sqrt(currentCount) if newDistance > self.maxDistance: return self.maxDistance return newDistance
def _bezierpolyrange(x0, x1, x2, x3): tc = [0, 1] a = x3 - 3*x2 + 3*x1 - x0 b = 2*x0 - 4*x1 + 2*x2 c = x1 - x0 s = b*b - 4*a*c if s >= 0: if b >= 0: q = -0.5*(b+math.sqrt(s)) else: q = -0.5*(b-math.sqrt(s)) try: t = q*1.0/a except ZeroDivisionError: pass else: if 0 < t < 1: tc.append(t) try: t = c*1.0/q except ZeroDivisionError: pass else: if 0 < t < 1: tc.append(t) p = [(((a*t + 1.5*b)*t + 3*c)*t + x0) for t in tc] return min(*p), max(*p)
def estimateBIsochrone(R,z,pot=None): """ NAME: estimateBIsochrone PURPOSE: Estimate a good value for the scale of the isochrone potential by matching the slope of the rotation curve INPUT: R,z = coordinates (if these are arrays, the median estimated delta is returned, i.e., if this is an orbit) pot= Potential instance or list thereof OUTPUT: b if 1 R,Z given bmin,bmedian,bmax if multiple R given HISTORY: 2013-09-12 - Written - Bovy (IAS) """ if pot is None: #pragma: no cover raise IOError("pot= needs to be set to a Potential instance or list thereof") if isinstance(R,nu.ndarray): bs= nu.array([estimateBIsochrone(R[ii],z[ii],pot=pot) for ii in range(len(R))]) return (nu.amin(bs[True-nu.isnan(bs)]), nu.median(bs[True-nu.isnan(bs)]), nu.amax(bs[True-nu.isnan(bs)])) else: r2= R**2.+z**2 r= math.sqrt(r2) dlvcdlr= dvcircdR(pot,r)/vcirc(pot,r)*r try: b= optimize.brentq(lambda x: dlvcdlr-(x/math.sqrt(r2+x**2.)-0.5*r2/(r2+x**2.)), 0.01,100.) except: #pragma: no cover b= nu.nan return b
def test_vec_to_sym_matrix(): # Check error if unsuitable size vec = np.ones(31) assert_raises_regex(ValueError, 'Vector of unsuitable shape', vec_to_sym_matrix, vec) # Check error if given diagonal shape incompatible with vec vec = np.ones(3) diagonal = np.zeros(4) assert_raises_regex(ValueError, 'incompatible with vector', vec_to_sym_matrix, vec, diagonal) # Check output value is correct vec = np.ones(6, ) sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.], [1., 1., sqrt(2)]]) assert_array_almost_equal(vec_to_sym_matrix(vec), sym) # Check output value is correct with seperate diagonal vec = np.ones(3, ) diagonal = np.ones(3) assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym) # Check vec_to_sym_matrix is the inverse function of sym_matrix_to_vec # when diagonal is included assert_array_almost_equal(vec_to_sym_matrix(sym_matrix_to_vec(sym)), sym) # when diagonal is discarded vec = sym_matrix_to_vec(sym, discard_diagonal=True) diagonal = np.diagonal(sym) / sqrt(2) assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
def __init__(self, shape, degree): quadrature.Quadrature.__init__(self) if shape == "line": if degree <= 1: self._quadrature_data = [ quadrature.QPData(Point(0.0, index=0), 2.0) ] elif degree <= 3: self._quadrature_data = [ quadrature.QPData(Point(-sqrt(1/3), index=0), 1.0), quadrature.QPData(Point(sqrt(1/3), index=1), 1.0) ] elif degree <= 5: self._quadrature_data = [ quadrature.QPData(Point(-sqrt(3/5), index=0), 5/9), quadrature.QPData(Point(0.0, index=1), 8/9), quadrature.QPData(Point(sqrt(3/5), index=2), 5/9) ] else: raise Exception("Gauss quadrature not implemented for degree \"{0:d}\"".format(degree)) else: raise Exception("Gauss quadrature not implemented for shape \"{0:s}\"".format(shape))
def generate(data): m = random.choice([3, 1.4, 1.6, 1.8]) h = random.choice([4, 12, 14, 16]) d = 1.5*h g = 9.8 v0xmin = d*math.sqrt(g/(2*h)) v0x = round(random.choice([4, v0xmin*1.4, v0xmin*1.6, v0xmin*1.8]), 3) data["params"]["m"] = m data["params"]["h"] = h data["params"]["d"] = d data["params"]["v0x"] = v0x t = d/v0x data["params"]["t_c"] = round(t, 3) data["params"]["t_x1"] = round(math.sqrt(2*h/g), 3) data["params"]["t_x2"] = round(v0x*2/g, 3) v0y = 0.5*g*t - h/t data["params"]["vy_c"] = round(v0y, 2) data["params"]["vy_x1"] = round(-math.sqrt((g*t)**2 + v0x**2/2), 2) data["params"]["vy_x2"] = round( -0.5*g*t - h/2, 2) data["params"]["vy_x3"] = round(-math.sqrt(v0x**2 + v0y**2), 2) data["params"]["vy_x4"] = 0
def testStudentLogPDFMultidimensional(self): with self.test_session(): batch_size = 6 df = constant_op.constant([[1.5, 7.2]] * batch_size) mu = constant_op.constant([[3., -3.]] * batch_size) sigma = constant_op.constant([[-math.sqrt(10.), math.sqrt(15.)]] * batch_size) df_v = np.array([1.5, 7.2]) mu_v = np.array([3., -3.]) sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)]) t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T student = student_t.StudentT(df, loc=mu, scale=sigma) log_pdf = student.log_prob(t) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = student.prob(t) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v) expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v) self.assertAllClose(expected_log_pdf, log_pdf_values) self.assertAllClose(np.log(expected_pdf), log_pdf_values) self.assertAllClose(expected_pdf, pdf_values) self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def _beta_analysis1(stream=None): """private function called by beta_analysis()""" if stream is None: stream=sys.stdout q2 = [] for i in range(1,17): q_copy=copy.copy(self) q_copy.beta=2**(i/4.0) q_copy.dim=250 q_copy.grain=0.02 q_copy.recompute() q2.append(q_copy) na = num.array # shorthand t2 = na([q2i.mean() for q2i in q2]) p2 = na([q2i.pdf_at(t2i) for q2i,t2i in zip(q2,t2)]) sd2 = na([q2i.sd() for q2i in q2]) beta2 = na([q2i.beta for q2i in q2]) i=num.argsort(p2)[-1] t=t2[i] sd=q2[i].sd() p=num.sum(p2) betaMean=num.sum(p2*beta2)/p betaSd=math.sqrt(num.sum(p2*beta2**2)/p-(num.sum(p2*beta2)/p)**2) iBetaMean=num.sum(p2/beta2)/p iBetaSd=math.sqrt(num.sum(p2/beta2**2)/p-(num.sum(p2/beta2)/p)**2) stream.write('%5.2f %5.2f %4.1f %4.1f %6.3f\n'%(t,sd,1/iBetaMean,betaSd,self.gamma))
def __init__(self,n_hidden,n_input,n_out,fnc = 'sigmoid',loss_fnc= softmax,batchsize = 10,epochs = 1,learning_rate = 0.1,reg = 0.0,momentum = 0.0): self.nn = {} self.nn['batchsize'] = batchsize self.nn['epochs'] = epochs self.nn['learning_rate'] = learning_rate self.nn['reg'] = reg self.nn['momentum'] = momentum self.nn['loss_fnc'] = loss_fnc self.nn['w1'] = np.random.randn(n_hidden*n_input).reshape(n_input,n_hidden)/math.sqrt(n_hidden*n_input) self.nn['b1'] = np.zeros(n_hidden).reshape(n_hidden) self.nn['w2'] = np.random.random(n_hidden*n_out).reshape(n_hidden,n_out)/math.sqrt(n_hidden*n_out) self.nn['b2'] = np.zeros(n_out).reshape(n_out) self.nn['dw1'] = np.zeros_like(self.nn['w1']) self.nn['db1'] = np.zeros_like(self.nn['b1']) self.nn['dw2'] = np.zeros_like(self.nn['w2']) self.nn['db2'] = np.zeros_like(self.nn['b2']) self.nn['p_dw1'] = self.nn['dw1'] self.nn['p_db1'] = self.nn['db1'] self.nn['p_dw2'] = self.nn['dw2'] self.nn['p_db2'] = self.nn['db2']
def AzimuthalPointsSelected(self): xyinner = self.storedClickCoordinates[0] xyouter = self.storedClickCoordinates[1] xinner = xyinner[0] yinner = xyinner[1] xouter = xyouter[0] youter = xyouter[1] innerxDiff = xinner - self.parameters.centreX inneryDiff = yinner - self.parameters.centreY outerxDiff = xouter - self.parameters.centreX outeryDiff = youter - self.parameters.centreY innerCircleDistance = sqrt((innerxDiff * innerxDiff) + (inneryDiff * inneryDiff)) outerCircleDistance = sqrt((outerxDiff * outerxDiff) + (outeryDiff * outeryDiff)) innerOvalx1 = self.parameters.centreX - innerCircleDistance innerOvaly1 = self.parameters.centreY - innerCircleDistance innerOvalx2 = self.parameters.centreX + innerCircleDistance innerOvaly2 = self.parameters.centreY + innerCircleDistance outerOvalx1 = self.parameters.centreX - outerCircleDistance outerOvaly1 = self.parameters.centreY - outerCircleDistance outerOvalx2 = self.parameters.centreX + outerCircleDistance outerOvaly2 = self.parameters.centreY + outerCircleDistance Rad1 = sqrt(((xinner - self.parameters.centreX) * (xinner - self.parameters.centreX)) + ((yinner - self.parameters.centreY) * (yinner - self.parameters.centreY))) Rad2 = sqrt(((xouter - self.parameters.centreX) * (xouter - self.parameters.centreX)) + ((youter - self.parameters.centreY) * (youter - self.parameters.centreY))) self.parameters.outerCircleRadians = int(round(max(Rad1, Rad2))) self.parameters.innerCircleRadians = int(round(min(Rad1, Rad2))) self.AzimuthalIntegrationSetupWindow() self.clicksStoring = 2 self.ResetMainImageFrame() self.mainImageCanvas.create_oval(innerOvalx1, innerOvaly1, innerOvalx2, innerOvaly2, width = 3, outline = "red") self.mainImageCanvas.create_oval(outerOvalx1, outerOvaly1, outerOvalx2, outerOvaly2, width = 3, outline = "red")
def esfericaCoordenada(x, y, z): from math import atan, sqrt, pi r=sqrt(x**2+y**2+z**2) if z>0: phi=atan(sqrt(x**2+y**2)/z) elif z=0: phi= pi/2
def relbreit(x,par): a = x[0]**2. - par[1]**2. b = par[1]*par[2] c = par[1]*math.sqrt(par[1]**2.+par[2]**2.) d = (2*math.sqrt(2)/math.pi)*(par[1]*par[2]*c) e = d/math.sqrt(par[1]**2.+c) return par[0]*e/(a**2. + b**2.)
def testStudentSampleMultiDimensional(self): with self.test_session(): batch_size = 7 df = constant_op.constant([[3., 7.]] * batch_size) mu = constant_op.constant([[3., -3.]] * batch_size) sigma = constant_op.constant([[math.sqrt(10.), math.sqrt(15.)]] * batch_size) df_v = [3., 7.] mu_v = [3., -3.] sigma_v = [np.sqrt(10.), np.sqrt(15.)] n = constant_op.constant(200000) student = student_t.StudentT(df=df, loc=mu, scale=sigma) samples = student.sample(n, seed=123456) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (200000, batch_size, 2)) self.assertAllClose( sample_values[:, 0, 0].mean(), mu_v[0], rtol=1e-2, atol=0) self.assertAllClose( sample_values[:, 0, 0].var(), sigma_v[0]**2 * df_v[0] / (df_v[0] - 2), rtol=1e-1, atol=0) self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0]) self.assertAllClose( sample_values[:, 0, 1].mean(), mu_v[1], rtol=1e-2, atol=0) self.assertAllClose( sample_values[:, 0, 1].var(), sigma_v[1]**2 * df_v[1] / (df_v[1] - 2), rtol=1e-1, atol=0) self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 1])
def avgQ(qx,qy,h): nx,ny = h.shape[0], h.shape[1] tx,ty = ida.tipcoord(h) ch=[] lp=0 Q = np.zeros(tx) for x in range(tx): vec = h[x,:] ch.append([x for i in vec if i>2.0]) if(np.size(ch[x])==ny): lp=x #ch = np.array(ch) for x in range(tx): for y in range(ny): if x <= lp: Q[x] += (math.sqrt((qx[x][y]**2.0+qy[x][y]**2.0)))/ny #Q[x] += p[x][y]/ny else: break for x in range(lp+1,tx): for y in range(ty-200,ty+200): if h[x][y] > 2.0: Q[x] += (math.sqrt((qx[x][y]**2.0+qy[x][y]**2.0)))#/np.size(ch[x]) # Q[x] += p[x][y]/np.size(ch[x]) #plt.plot(Q) #plt.show() return Q
def average_data(data): """ Find mean and std. deviation of data returned by ``simulate``. """ numnodes = data['nodes'] its = data['its'] its_mean = numpy.average(its) its_std = math.sqrt(numpy.var(its)) dead = data['dead'] dead_mean = 100.0*numpy.average(dead)/numnodes dead_std = 100.0*math.sqrt(numpy.var(dead))/numnodes immune = data['immune'] immune_mean = 100.0*numpy.average(immune)/numnodes immune_std = 100.0*math.sqrt(numpy.var(immune))/numnodes max_contam = data['max_contam'] max_contam_mean = 100.0*numpy.average(max_contam)/numnodes max_contam_std = 100.0*math.sqrt(numpy.var(max_contam))/numnodes normal = data['normal'] normal_mean = 100.0*numpy.average(normal)/numnodes normal_std = 100.0*math.sqrt(numpy.var(normal))/numnodes return {'its': (its_mean, its_std), 'nodes': numnodes, 'dead': (dead_mean, dead_std), 'immune': (immune_mean, immune_std), 'max_contam': (max_contam_mean, max_contam_std), 'normal': (normal_mean, normal_std)}
J. Ruths and D. Ruths (2014). Control Profiles of Complex Networks. Science, 343(6177), 1373-1376. """ import math import matplotlib import matplotlib.pyplot as pyplot #from types import TupleType, ListType from zen import DiGraph from zen.control import profile as control_profile __all__ = ['profile_plot', 'profile_heatmap', 'profile_heatmap_weighted'] ## Constants ## _SQRT3OVER2 = math.sqrt(3) / 2. ## Helpers ## def _unzip(l): #return [x for (x,y) in l], [y for (x,y) in l] return zip(*l) def _normalize(xs): s = float(sum(xs)) return [x / s for x in xs] ## Boundary ##
pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', -1) for filename in all_files: dfi = pd.read_csv(filename, index_col=None, header=0) dfi = dfi.sort_values('mse') selectedi = dfi.head(1) li.append(dfi) df = pd.concat(li, axis=0, ignore_index=True) if args.abc: dfa = df[df['formulas'].str.contains("_A")] dfb = dfa[dfa['formulas'].str.contains("_B")] df = dfb[dfb['formulas'].str.contains("_C")] df = df.sort_values('mse') selected = df.head(args.n) previousvalue = float("inf") for f in selected[["formulas", "mse"]].values: rmse = math.sqrt(f[1]) if rmse != previousvalue: print(f[0], rmse) previousvalue = rmse
def mathfun(q, q2, q3): xloccurrent = 0 yloccurrent = 0 zloccurrent = 0 xerrortot = 0 yerrortot = 0 zerrortot = 0 while True: # making sure that only the most recent addition to the queue is used loclist2 = q2.get(block=True) meanquat0m = loclist2[0] meanquatxm = loclist2[1] meanquatym = loclist2[2] meanquatzm = loclist2[3] stdquat0m = loclist2[4] stdquatxm = loclist2[5] stdquatym = loclist2[6] stdquatzm = loclist2[7] xvec = 1 - 2 * (meanquatym**2 + meanquatzm**2) yvec = 2 * (meanquatxm * meanquatym + meanquatzm * meanquat0m) zvec = 2 * (meanquatxm * meanquatzm - meanquatym * meanquat0m) xvecnorm = xvec * math.cos(headingoffset * pi / 180) - yvec * math.sin( headingoffset * pi / 180) yvecnorm = xvec * math.sin(headingoffset * pi / 180) + yvec * math.cos( headingoffset * pi / 180) # updates the x y and z locations based on trig using dead reckoning xloccurrent = xloccurrent + distanceperencoder * xvecnorm yloccurrent = yloccurrent + distanceperencoder * yvecnorm zloccurrent = zloccurrent + distanceperencoder * zvec print('Xloc={0:0.2F} Yloc={1:0.2F} Zloc={2:0.2F}'.format( xloccurrent, yloccurrent, zloccurrent)) xloclist.append(xloccurrent) yloclist.append(yloccurrent) zloclist.append(zloccurrent) # determines error according to error propagation formula. assuming worst # case error xerror = math.sqrt((4 * meanquatym * stdquatym)**2 + (4 * meanquatzm * stdquatzm)**2) yerror = math.sqrt((2 * meanquatym * stdquatxm)**2 + (2 * meanquatxm * stdquatym)**2 + (2 * meanquat0m * stdquatzm)**2 + (2 * meanquatzm * stdquat0m)**2) zerror = math.sqrt((2 * meanquatzm * stdquatxm)**2 + (2 * meanquatxm * stdquatzm)**2 + (2 * meanquat0m * stdquatym)**2 + (2 * meanquatym * stdquat0m)**2) #print(headingoffset) hosr = headingoffset * pi / 180 xnormerror = math.sqrt((math.cos(hosr) * xerror)**2 + (math.sin(hosr) * yerror)**2 + (xvec * math.sin(hosr) * delhosr)**2 + (yvec * math.cos(hosr) * delhosr)**2) ynormerror = math.sqrt((math.sin(hosr) * xerror)**2 + (math.cos(hosr) * yerror)**2 + (xvec * math.cos(hosr) * delhosr)**2 + (yvec * math.sin(hosr) * delhosr)**2) znormerror = zerror xdisterror = math.sqrt((xvecnorm * disterror)**2 + (distanceperencoder * xnormerror)**2) ydisterror = math.sqrt((yvecnorm * disterror)**2 + (distanceperencoder * ynormerror)**2) zdisterror = math.sqrt((zvec * disterror)**2 + (distanceperencoder * znormerror)**2) xerrortot = xerrortot + xdisterror yerrortot = yerrortot + ydisterror zerrortot = zerrortot + zdisterror xerrorlist.append(xerrortot) yerrorlist.append(yerrortot) zerrorlist.append(zerrortot) quat0listm.append(meanquat0m) quatxlistm.append(meanquatxm) quatylistm.append(meanquatym) quatzlistm.append(meanquatzm) stdquat0list.append(stdquat0m) stdquatxlist.append(stdquatxm) stdquatylist.append(stdquatym) stdquatzlist.append(stdquatzm) print('Xerror={0:0.2F} Yerror={1:0.2F} Zerror={2:0.2F}'.format( xerror, yerror, zerror)) # imputing information into the queue q.put([xloclist, yloclist, zloclist, xerrortot, yerrortot, zerrortot]) print(q3.full()) q3.put([ xloclist, yloclist, zloclist, xerrorlist, yerrorlist, zerrorlist, quat0listm, quatxlistm, quatylistm, quatzlistm, stdquat0list, stdquatxlist, stdquatylist, stdquatzlist, headingoffset ])
stopping[stopping.columns[index]] = history.history['val_loss'] train_loss[train_loss.columns[index]] = history.history['loss'] # Plotting all k of the value loss per epoch ax = stopping.plot(kind = 'line', legend = False, color = ['lightgrey']) ax.set_xlabel('Epoch') ax.set_ylabel('Validation loss') # The expected (average) loss for each number of epochs mus = stopping.mean(numeric_only = True, axis = 1) # The standard deviation for each number of epochs sds = stopping.std(axis = 1, skipna = True) # Gets standard deviation per columns ses = sds / math.sqrt(k-1) # Standard errors # Expected loss ax.plot(mus, color = 'black', linewidth = 2) # Confidence limits ax.plot(mus + 2*ses, color = 'black', linewidth = 1.2) ax.plot(mus - 2*ses, color = 'black', linewidth = 1.2) # Smallest loss mn = mus.idxmin() # Hastie and Tibshirani recommend taking the most conservative value (smallest number of epochs for us) # that is within one standard error of the minimum value upper = mus[mn] + ses[mn] lower = mus[mn] - ses[mn]
def __init__(self): rospy.init_node('nav_test', anonymous=True) rospy.on_shutdown(self.shutdown) self.voice = rospy.get_param("~voice", "voice_don_diphone") self.soundhandle = SoundClient() rospy.sleep(1) self.soundhandle.stopAll() rospy.sleep(1) self.soundhandle.say("Ready", self.voice) rospy.sleep(1) # Create sound client self.words=SoundClient() # Subscribe to the /recognizer/output topic to receive voice commands rospy.Subscriber('/recognizer/output', String, MicInputEventCallback) # Subscribe to the /mobile_base/events/digital_input topic to receive DIO rospy.Subscriber('/mobile_base/events/digital_input', DigitalInputEvent, DigitalInputEventCallback) # How long in seconds should the robot pause at each location? self.rest_time = rospy.get_param("~rest_time", 10) # Are we running in the fake simulator? self.fake_test = rospy.get_param("~fake_test", False) # Goal state return values goal_states = ['PENDING', 'ACTIVE', 'PREEMPTED', 'SUCCEEDED', 'ABORTED', 'REJECTED', 'PREEMPTING', 'RECALLING', 'RECALLED', 'LOST'] # Set up the goal locations. Poses are defined in the map frame. # An easy way to find the pose coordinates is to point-and-click # Nav Goals in RViz when running in the simulator. # Pose coordinates are then displayed in the terminal # that was used to launch RViz. locations = dict() locations['hall_1'] = Pose(Point(0.0, 1.0, 0.000), Quaternion(0.000, 0.000, 0.223, 1.000)) locations['hall_2'] = Pose(Point(0.0, -1.0, 0.000), Quaternion(0.000, 0.000, -0.670, 0.743)) #locations['hall_bedroom'] = Pose(Point(-3.719, 4.401, 0.000), Quaternion(0.000, 0.000, 0.733, 0.680)) #locations['living_room_1'] = Pose(Point(0.720, 2.229, 0.000), Quaternion(0.000, 0.000, 0.786, 0.618)) #locations['living_room_2'] = Pose(Point(1.471, 1.007, 0.000), Quaternion(0.000, 0.000, 0.480, 0.877)) #locations['dining_room_1'] = Pose(Point(-0.861, -0.019, 0.000), Quaternion(0.000, 0.000, 0.892, -0.451)) # Publisher to manually control the robot (e.g. to stop it) self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist) # Subscribe to the move_base action server self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction) rospy.loginfo("Waiting for move_base action server...") # Wait 60 seconds for the action server to become available self.move_base.wait_for_server(rospy.Duration(60)) rospy.loginfo("Connected to move base server") # A variable to hold the initial pose of the robot to be set by # the user in RViz initial_pose = PoseWithCovarianceStamped() # Variables to keep track of success rate, running time, # and distance traveled n_locations = len(locations) n_goals = 0 n_successes = 0 i = n_locations distance_traveled = 0 start_time = rospy.Time.now() running_time = 0 location = "" last_location = "" # Get the initial pose from the user rospy.loginfo("*** Click the 2D Pose Estimate button in RViz to set the robot's initial pose...") rospy.wait_for_message('initialpose', PoseWithCovarianceStamped) self.last_location = Pose() rospy.Subscriber('initialpose', PoseWithCovarianceStamped, self.update_initial_pose) # Make sure we have the initial pose while initial_pose.header.stamp == "": rospy.sleep(1) rospy.loginfo("Starting navigation test") # Begin the main loop and run through a sequence of locations while not rospy.is_shutdown(): # If we've gone through the current sequence, # start with a new random sequence if digitalS[2]==False: i=0 # Keep track of the distance traveled. # Use updated initial pose if available. if initial_pose.header.stamp == "": distance = sqrt(pow(locations[location].position.x - locations[last_location].position.x, 2) + pow(locations[location].position.y - locations[last_location].position.y, 2)) else: rospy.loginfo("Updating current pose.") distance = sqrt(pow(locations[location].position.x - initial_pose.pose.pose.position.x, 2) + pow(locations[location].position.y - initial_pose.pose.pose.position.y, 2)) initial_pose.header.stamp = "" # Store the last location for distance calculations last_location = location # Increment the counters i += 1 n_goals += 1 # Set up the next goal location self.goal = MoveBaseGoal() self.goal.target_pose.pose = locations[location] self.goal.target_pose.header.frame_id = 'map' self.goal.target_pose.header.stamp = rospy.Time.now() # Let the user know where the robot is going next rospy.loginfo("Going to: " + str(location)) # Start the robot toward the next location self.move_base.send_goal(self.goal) # Allow 5 minutes to get there finished_within_time = self.move_base.wait_for_result(rospy.Duration(300)) # Check for success or failure if not finished_within_time: self.move_base.cancel_goal() rospy.loginfo("Timed out achieving goal") else: state = self.move_base.get_state() if state == GoalStatus.SUCCEEDED: rospy.loginfo("Goal succeeded!") n_successes += 1 distance_traveled += distance rospy.loginfo("State:" + str(state)) else: rospy.loginfo("Goal failed with error code: " + str(goal_states[state])) # How long have we been running? running_time = rospy.Time.now() - start_time running_time = running_time.secs / 60.0 # Print a summary success/failure, distance traveled and time elapsed rospy.loginfo("Success so far: " + str(n_successes) + "/" + str(n_goals) + " = " + str(100 * n_successes/n_goals) + "%") rospy.loginfo("Running time: " + str(trunc(running_time, 1)) + " min Distance: " + str(trunc(distance_traveled, 1)) + " m") rospy.sleep(self.rest_time)
# computing the difference in the distances # Use some functions and values from the math package from math import sqrt, sin, cos, pi # Location of orbiting point is (x,y) # Location of fixed point is always (100, 0), # AKA (p_x, p_y). Change these as necessary. p_x = 100 p_y = 0 # Radians in 10 degrees radians = 10 * pi/180 # Precompute the cosine and sine of 10 degrees COS10 = cos(radians) SIN10 = sin(radians) # Get starting point from user x, y = eval(input("Enter initial satellite coordinates (x,y):")) # Compute the initial distance d1 = sqrt((p_x - x)*(p_x - x) + (p_y - y)*(p_y - y)) # Let the satellite orbit 10 degrees x_old = x; # Remember x's original value x = x*COS10 - y*SIN10 # Compute new x value # x's value has changed, but y's calculate depends on # x's original value, so use x_old instead of x. y = x_old*SIN10 + y*COS10 # Compute the new distance d2 = sqrt((p_x - x)*(p_x - x) + (p_y - y)*(p_y - y)) # Print the difference in the distances print("Difference in distances: %.3f" % (d2 - d1))
# This function accepts a Numpy arrays with N elements, and make a 1D array of M*N elements with random order. def randomize(x,M): Xdeck=np.repeat(x,M,axis=0) np.random.shuffle(Xdeck) np.random.shuffle(Xdeck) return Xdeck # I generally prefer to use the SFrame to read my data. In this example, I read a polarization catalog of radio galaxies with 533 rows and 51 columns. All=sf.SFrame.read_csv('/Users/Mehdi/Dropbox/SPASS/main/SPASS-NVSS-final.csv',skiprows=52) # I select the exact sub-sample of sources that I am interested and choose the parameters I want to run the KS and Spearman tests on. # num is number of simulations or bootstraped samples num=100000 # Making the target original sample from catalog. Here I perform multiple queries to choose my objects. sample=All[(All['W1snr'] >= 5) & (All['W2snr'] >= 5)& (All['W3snr'] >= 2)& ((All['W2er']**2+All['W3er']**2).apply(lambda x: sqrt(x)) < 0.4)] # Making Numpy arrays of the two quantities that going to be subject of the tests var1=sample['Alpha'].to_numpy() var2=sample['W1'].to_numpy()-sample['W2'].to_numpy() # Dividing the orginal observed sample into two subsamples based on the median value of parameter var1. #Note that the KS test will be performed on var2. but the Spearman measure the correlation coefficient between var1 and var2 med=np.median(var1) half1=var2[np.where(var1< med)] half2=var2[np.where(var1>= med)] ks_main=stats.mstats.ks_twosamp(half1,half2) print 'KS test ', ks_main rho_main, spvalue =stats.spearmanr(var1,var2) print 'Spearman rank correlation ', rho_main print 'Spearman p-value', spvalue
import math ##def is_sqr(x): ## return math.sqrt(x) % 1 == 0 tmplist = filter(lambda x:math.sqrt(x) % 1 == 0,range(101)) newlist = list (tmplist) print(newlist) print (13.45 % 1) #小數點取餘數 print(1345 %1) #整數取餘數 print(1345.0 %1) #浮點數取餘數
def __init__(self, input_size, hidden_size, num_layers=1, has_bias=True, batch_first=False, dropout=0, bidirectional=False): super(LSTM, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.has_bias = has_bias self.batch_first = validator.check_value_type("batch_first", batch_first, [bool], self.cls_name) self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.cls_name) self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.cls_name) self.dropout = float(dropout) self.bidirectional = bidirectional if self.batch_first: self.transpose1 = P.Transpose() self.transpose2 = P.Transpose() num_directions = 2 if self.bidirectional else 1 self.cpu_target = False enable_debug = context.get_context("enable_debug_runtime") if context.get_context("device_target") == "CPU" and not enable_debug: self.cpu_target = True if not self.cpu_target: self.lstm = P.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers, has_bias=self.has_bias, bidirectional=self.bidirectional, dropout=self.dropout) weight_size = 0 gate_size = 4 * self.hidden_size for layer in range(self.num_layers): input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions increment_size = gate_size * input_layer_size increment_size += gate_size * self.hidden_size if self.has_bias: increment_size += 2 * gate_size weight_size += increment_size * num_directions stdv = 1 / math.sqrt(hidden_size) w_np = np.random.uniform(-stdv, stdv, (weight_size, 1, 1)).astype(np.float32) self.weight = Parameter(initializer(Tensor(w_np), [weight_size, 1, 1]), name='weight') else: input_size_list = [] input_size_list.append(self.input_size) for i in range(self.num_layers - 1): input_size_list.append(self.hidden_size * num_directions) weights = [] layers = [] bias_size = 0 if not self.has_bias else num_directions * self.hidden_size * 4 stdv = 1 / math.sqrt(hidden_size) for i in range(num_layers): weight_size = (input_size_list[i] + self.hidden_size) * num_directions * self.hidden_size * 4 if has_bias: weight_size = weight_size + bias_size w_np = np.random.uniform(-stdv, stdv, (weight_size, 1, 1)).astype(np.float32) weights.append(Parameter(initializer(Tensor(w_np), w_np.shape), name='weight' + str(i))) layers.append(nn.LSTMCell(input_size=input_size_list[i], hidden_size=self.hidden_size, has_bias=self.has_bias, bidirectional=self.bidirectional, dropout=self.dropout)) self.lstms = layers self.weight = ParameterTuple(tuple(weights)) self.fill = P.Fill() self.shape = P.Shape()
j = 0 while j < len(pol_vec_n): pol_vec_n_r = np.around(pol_vec_n[j] * 1000000.) / 1000000. pol_vec_new.append(pol_vec_n_r) j = j + 1 x_vec_new = pol_vec_new[0] y_vec_new = pol_vec_new[1] z_vec_new = pol_vec_new[2] if y_vec_new >= 0. and x_vec_new >= 0.: theta_pol_global = (180. / math.pi) * math.acos( z_vec_new / math.sqrt(x_vec_new**2. + y_vec_new**2. + z_vec_new**2.)) phi_pol_global = (180. / math.pi) * math.asin( y_vec_new / math.sqrt(x_vec_new**2. + y_vec_new**2.)) if y_vec_new >= 0. and x_vec_new < 0.: theta_pol_global = (180. / math.pi) * math.acos( z_vec_new / math.sqrt(x_vec_new**2. + y_vec_new**2. + z_vec_new**2.)) phi_pol_global = 180. - (180. / math.pi) * math.asin( y_vec_new / math.sqrt(x_vec_new**2. + y_vec_new**2.)) if y_vec_new < 0. and x_vec_new == 0.: theta_pol_global = (180. / math.pi) * math.acos( z_vec_new / math.sqrt(x_vec_new**2. + y_vec_new**2. + z_vec_new**2.)) phi_pol_global = 360 + (180. / math.pi) * math.asin( y_vec_new / math.sqrt(x_vec_new**2. + y_vec_new**2.))
def step(self, params, state): ps, qs, bits_sent, messages_sent, iteration_number, rngs = state my_rank = torch.distributed.get_rank() params = [param.data.clone() for param in params] for iteration in range(iteration_number, iteration_number + self.num_iterations): if iteration == 0: # there has been no gradient update yet, so no disagreement between the neighbors break # Switch between left and right matrix multiplications if iteration % 2 == 1: ps, qs = qs, ps p_and_q_are_swapped = True transpose_if_even = lambda m: m else: p_and_q_are_swapped = False transpose_if_even = lambda m: m.t() if not self.warm_start: for neighbor in self.topology.neighbor_ranks(my_rank): self.fill_with_random_values(ps[neighbor]["buffer"], rngs[neighbor]) request_handles = [] for neighbor in self.topology.neighbor_ranks(my_rank): # Do a local matrix multiplication for tensor, p, q in zip(params, ps[neighbor]["list"], qs[neighbor]["list"]): if self.round_weights: assert p.shape[1] == 1 p[:] = p[:].sign() / sqrt(p.shape[0]) else: orthogonalize(p) matrix = tensor.view(tensor.shape[0], -1) torch.matmul(transpose_if_even(matrix), p, out=q[:]) # Send the flattened vector with results to the neighbors handle = isend(qs[neighbor]["buffer"], neighbor) bits_sent += num_bits(qs[neighbor]["buffer"]) messages_sent += 1 request_handles.append(handle) any_neighbor = self.topology.neighbor_ranks(my_rank)[0] recv_buffer = torch.empty_like(qs[any_neighbor]["buffer"]) for handle, neighbor in zip(request_handles, self.topology.neighbor_ranks(my_rank)): # Recieve their results recv(recv_buffer, neighbor) handle.wait() # Store the outcome of the matrix multiplication (x_i - x_j)p, where i > j if my_rank > neighbor: qs[neighbor]["buffer"].sub_(recv_buffer) else: qs[neighbor]["buffer"][:] = recv_buffer - qs[neighbor]["buffer"] if p_and_q_are_swapped: # Swap back ps, qs = qs, ps for neighbor in self.topology.neighbor_ranks(my_rank): weight = self.topology.weight(my_rank, neighbor) for tensor, p, q in zip(params, ps[neighbor]["list"], qs[neighbor]["list"]): sign = -1 if my_rank > neighbor else 1 tensor.data.add_( sign * weight * self.diffusion_rate, (p @ q.t()).view(*tensor.shape) ) return ( params, self.State( ps, qs, bits_sent, messages_sent, iteration_number + self.num_iterations, rngs ), )
def learn_from_instance(self, X, y, weight, hat, parent, parent_branch): true_class = y class_prediction = 0 leaf = self.filter_instance_to_leaf(X, parent, parent_branch) if leaf.node is not None: class_prediction = get_max_value_key(leaf.node.get_class_votes(X, hat)) bl_correct = (true_class == class_prediction) if self._estimation_error_weight is None: self._estimation_error_weight = ADWIN() old_error = self.get_error_estimation() # Add element to ADWIN add = 0.0 if (bl_correct is True) else 1.0 self._estimation_error_weight.add_element(add) # Detect change with ADWIN self.error_change = self._estimation_error_weight.detected_change() if self.error_change is True and old_error > self.get_error_estimation(): self.error_change = False # Check condition to build a new alternate tree if self.error_change is True: self._alternate_tree = hat._new_learning_node() hat.alternate_trees_cnt += 1 # Condition to replace alternate tree elif self._alternate_tree is not None and self._alternate_tree.is_null_error() is False: if self.get_error_width() > error_width_threshold \ and self._alternate_tree.get_error_width() > error_width_threshold: old_error_rate = self.get_error_estimation() alt_error_rate = self._alternate_tree.get_error_estimation() fDelta = .05 fN = 1.0 / self._alternate_tree.get_error_width() + 1.0 / (self.get_error_width()) bound = math.sqrt(2.0 * old_error_rate * (1.0 - old_error_rate) * math.log(2.0 / fDelta) * fN) # To check, bound never less than (old_error_rate - alt_error_rate) if bound < (old_error_rate - alt_error_rate): hat._active_leaf_node_cnt -= self.number_leaves() hat._active_leaf_node_cnt += self._alternate_tree.number_leaves() self.kill_tree_children(hat) if parent is not None: parent.set_child(parent_branch, self._alternate_tree) else: # Switch tree root hat._tree_root = hat._tree_root.alternateTree hat.switch_alternate_trees_cnt += 1 elif bound < alt_error_rate - old_error_rate: if isinstance(self._alternate_tree, HAT.ActiveLearningNode): self._alternate_tree = None elif isinstance(self._alternate_tree, HAT.InactiveLearningNode): self._alternate_tree = None else: self._alternate_tree.kill_tree_children(hat) hat.pruned_alternate_trees_cnt += 1 # hat.pruned_alternate_trees_cnt to check # Learn_From_Instance alternate Tree and Child nodes if self._alternate_tree is not None: self._alternate_tree.learn_from_instance(X, y, weight, hat, parent, parent_branch) child_branch = self.instance_child_index(X) child = self.get_child(child_branch) if child is not None: child.learn_from_instance(X, y, weight, hat, parent, parent_branch)
def collides_circle(self, circle): dx = self.center[0, 0] - circle.center[0, 0] dy = self.center[1, 0] - circle.center[1, 0] dist = sqrt(dx * dx + dy * dy) return dist < (self.radius + circle.radius)
def mag(self): ''' get the magnitude of vector. ''' return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
epsR3= concrHA30.getShrEpscs(14,tS,90,600) err2= err2+(epsR3+28e-6)**2 epsR4= concrHA30.getShrEpscs(10000,tS,90,600) err2= err2+(epsR4+149e-6)**2 # Comprobamos con los valores de la tabla 39.7.d de la norma. epsR1d=concrHA70.getShrEpscs(14,tS,50,50) err2= err2+(epsR1d+178e-6)**2 epsR2d=concrHA70.getShrEpscs(10000,tS,50,50) err2= err2+(epsR2d+448e-6)**2 epsR3d=concrHA70.getShrEpscs(14,tS,90,600) err2= err2+(epsR3d+80e-6)**2 epsR4d=concrHA70.getShrEpscs(10000,tS,90,600) err2= err2+(epsR4d+211e-6)**2 ratio1= math.sqrt(err2) ''' print("epsR1= ",epsR1*1e6,"x10^(-6)\n") print("epsR2= ",epsR2*1e6,"x10^(-6)\n") print("epsR3= ",epsR3*1e6,"x10^(-6)\n") print("epsR4= ",epsR4*1e6,"x10^(-6)\n") print("epsR1d= ",epsR1d*1e6,"x10^(-6)\n") print("epsR2d= ",epsR2d*1e6,"x10^(-6)\n") print("epsR3d= ",epsR3d*1e6,"x10^(-6)\n") print("epsR4d= ",epsR4d*1e6,"x10^(-6)\n") print("ratio1= ",(ratio1)) ''' import os from misc_utils import log_messages as lmsg
def Xanes2Min(params, x, data, input, config, output): from .controls import generateAndRunWorkflow import copy Xanes2Min.count #lastParams Xanes2Min.count += 1 input2 = copy.deepcopy(input) energy_shift = 0.0 atoms = input['cluster'] amp = 1.0 input2['feff.corrections'] = [[0.0, 0.0]] # Set controls based on what has changed since last call # to function. if Xanes2Min.count == 1: control = [1, 1, 1, 1, 1, 1] else: control = [0, 0, 0, 0, 0, 0] ipar = 0 for param in list(params.values()): if Xanes2Min.lastParams is not None: diff = param != list(Xanes2Min.lastParams.values())[ipar] else: diff = True # Use case insensitive equal. if param.name.lower() == 'expansion': # Uniform expansion of coordinates in cluster. if diff: control = [1, 1, 1, 1, 1, 1] expansion = param.value atoms = [[ f[0], expansion * f[1], expansion * f[2], expansion * f[3] ] for f in atoms] elif param.name.lower() == 'broadening': # Lorentzian broadening applied to spectrum. if diff: control[5] = 1 broadening = param.value #input2['spectral_broadening'] = [[broadening]] input2['feff.corrections'][0][1] = broadening elif param.name.lower() == 'delta_e0': # Shift in absolute edge energy (shift of energy grid of spectrum). energy_shift = param.value elif param.name.lower() == 'bond': # Move a set of atoms away from absorber along a bond. # Find vector to move along (r_2 - r_1)/r12 # Get the two atoms defining the bond vector. if diff: control = [1, 1, 1, 1, 1, 1] bond = param.value bond_atoms = [ item - 1 for sublist in input2['fit.bond'] for item in sublist ] vec = [ input2['cluster'][bond_atoms[1]][i] - input2['cluster'][bond_atoms[0]][i] for i in [1, 2, 3] ] vecSquared = [vec[i]**2 for i in [0, 1, 2]] norm = math.sqrt(sum(vecSquared)) vec = [vec[i] / norm * bond for i in [0, 1, 2]] for atom in bond_atoms[1:]: for i in [1, 2, 3]: atoms[atom][i] += vec[i - 1] elif param.name.lower() == 'delta_efermi': #input2['fermi_shift'] = [[param.value]] input2['feff.corrections'][0][0] = param.value if diff: control[5] = 1 elif param.name.lower() == 'amplitude': amp = param.value else: print(('WARNING: UNKOWN PARAMETER ' + param.name + '!')) print('STOPPING NOW!!!') exit() ipar += 1 input2['cluster'] = atoms input2['feff.control'] = [control] # Need a copy of config to start wf over config2 = copy.deepcopy(config) # Set current working directory to xCDir, so that internal wf # will run inside of outer wf directory. config2['cwd'] = config['xcDir'] if False: # Save all runs of underlying handler in separate directories. config2['xcIndexStart'] = Xanes2Min.count else: config2['xcIndexStart'] = 1 dir = config['xcDir'] # Loop over targets in output. Not sure if there will ever be more than one output target here. # Set output and error files for target in output: with open(os.path.join(dir, 'corvus.fit.stdout'), 'w') as out, open(os.path.join(dir, 'corvus.fit.stderr'), 'w') as err: # Set the tagetList according to fit.target #xanes2MinIterator += 1 targetList = input['fit.target'] # generate and run the workflow for target, unless no run is necessary. generateAndRunWorkflow(config2, input2, targetList) x0, y = np.array(input2[input['fit.target'][0][0]]) y = y * amp # If there is an energy shift, shift the x-axis before # interpolating onto data grid x0 = x0 + energy_shift # On first call, check if experimental data is outside calculated # data, and redefine experimental data within range. global firstcall global fitconvfile if firstcall: print('Opening convergence file') try: os.remove('fitconvergence.dat') except OSError: pass fitconvfile = open('fitconvergence.dat', 'a') np.savetxt(fitconvfile, np.array([x, data]).transpose()) fitconvfile.write('\n') firstcall = False yterp = np.interp(x, x0, y, left=0.0, right=0.0) np.savetxt(fitconvfile, np.array([x, yterp]).transpose()) fitconvfile.write('\n') i = 0 residual = np.zeros(yterp.size) for yi in yterp: if (x[i] >= x0[0]) and (x[i] <= x0[-1]): residual[i] = yi - data[i] else: residual[i] = 0.0 i = i + 1 Xanes2Min.lastParams = copy.copy(params) return residual
def Ecell(): Ai = float(entrada2.get()) Cc = float(entrada3.get()) Cd = float(entrada4.get()) N = int(entrada1.get()) gnac = math.exp((-0.5*z*math.sqrt(Cc))/(1+(ANa/305)*math.sqrt(Cc))) gnad = math.exp((-0.5*z*math.sqrt(Cd))/(1+(ANa/305)*math.sqrt(Cd))) gclc = math.exp((-0.5*z*math.sqrt(Cc))/(1+(ACl/305)*math.sqrt(Cc))) gcld = math.exp((-0.5*z*math.sqrt(Cd))/(1+(ACl/305)*math.sqrt(Cd))) acem = math.log((gnac*Cc)/(gnad*Cd)) aaem = math.log((gclc*Cc)/(gcld*Cd)) #Calculo de la resistencia fo = 1.8 Rl = fo*(1/0.7)*(Es/Ai) Rh = fo*(1/5.5)*(Es/Ai) r = Raem+Rcem+Rh+Rl Rel = 0.54 Ri = N*r+Rel #calculo de voltaje Ecem = Pcem*((R*T)/(z*CF))*acem Eaem = Paem*((R*T)/(z*CF))*aaem Ecell = N*(Ecem+Eaem) # intensidad i = Ri+Re I = Ecell/i #corriente electrica #Potencia Pgross = (I**2)*Re Pd = Pgross/(N*Ai) #Voltaje Ec = Pgross/I df = pd.DataFrame({'Ri (ohms)': Ri,'Re (ohms)':Re,'Potencia (W)':Pgross, 'DPotencia (W/m^2)':Pd,'CE (A)':I,'Voltaje (V)':Ec}) #Graficas fig = plt.figure(figsize=(15,15)) fig.tight_layout() ax1 = fig.add_subplot(1,3,1) ax2 = fig.add_subplot(1,3,2) ax3 = fig.add_subplot(1,3,3) ax1.plot(I,Pgross,'ro') ax2.plot(I,Ec,marker='*') ax3.plot(I,Pd,'g+') ax1.set_xlabel('Intensidad') ax1.set_ylabel('Potencia') ax2.set_xlabel('Intensidad') ax2.set_ylabel('Voltaje') ax3.set_xlabel('Intensidad') ax3.set_ylabel('potencia/Área') ax1.set_title('Potencial maximo') ax2.set_title('Intensidad vs Voltaje') ax1.grid() ax2.grid() ax3.grid() plt.show() return var.set(Ecell)
def move_to_target_fire(self,x,y,size,attitude,location): # exit immediately if we are not controlling the vehicle if not self.controlling_vehicle: return # get active command active_command = self.vehicle.commands.next # get current time now = time.time() # exit immediately if it's been too soon since the last update if (now - self.guided_last_update) < self.vel_update_rate: return; # if we have a new balloon position recalculate velocity vector if (self.fire_found): #x,y = target_info[1] #pitch_dir,yaw_dir = balloon_finder.pixels_to_direction(x, y, attitude.roll, attitude.pitch, attitude.yaw) #yaw_dir = yaw_dir % 360 #print "Target Yaw:%f" %(yaw_dir) #target_distance = target_info[2] #print "Target Distance:%f" %(target_distance*100) #shift origin to center of image x,y = shift_to_origin((x,y),self.camera_width,self.camera_height) hfov = self.camera_hfov vfov = self.camera_vfov #stabilize image with vehicle attitude x -= (self.camera_width / hfov) * math.degrees(attitude.roll) y += (self.camera_height / vfov) * math.degrees(attitude.pitch) #convert to distance X, Y = self.pixel_point_to_position_xy((x,y),location.z) #convert to world coordinates target_headings = math.atan2(Y,X) #% (2*math.pi) target_heading = (attitude.yaw - target_headings) target_distance = math.sqrt(X**2 + Y**2) #sc_logger.text(sc_logger.GENERAL, "Distance to target: {0}".format(round(target_distance,2))) #calculate speed toward target speed = target_distance * self.dist_to_vel #apply max speed limit speed = min(speed,self.vel_speed_max) #caalculate cartisian speed vx = speed * math.sin(target_heading) * -1.0 vy = speed * math.cos(target_heading) #*-1.0 print "Found Target go to vx:%f vy:%f Alt:%f target_distance:%f headings:%f heading:%f " % (vx,vy,location.z,target_distance,target_headings,target_heading) #only descend when on top of target if(location.z > 3.5): vz = 0.25 else: vz = 0 if active_command == 3: #PICK MP1 #Jika ketinggian sudah dibawah 4 meter, maka MAGNET ON #self.servo(0)#kamera siap untuk pick if (location.z < 4.0): if(location.z > 1.5): vz = 0.15 else: vz = 0 if (self.c == True): self.relay2(1) #Capit ON self.waktu = 0 self.c = False print ("Payload sudah diambil, lanjut misi") sc_logger.text(sc_logger.GENERAL, "Payload sudah diambil, lanjut misi") self.controlling_vehicle = False self.vehicle.mode = VehicleMode("AUTO") self.vehicle.flush() # if active_command = 3: # if if active_command == 4: #DROP MP1 #self.servo(1)#kamera siap untuk drop if(location.z > 2.0): vz = 0.25 else: if (location.z > 1.6): #Menurunkan Kecepatan Descent vz = 0.2 else: vz = 0 if (self.c == True): self.relay2(0) #Capit OFF self.waktu = 0 self.c = False self.drop = 1 print ("Payload sudah DROP, lanjut misi") sc_logger.text(sc_logger.GENERAL, "Payload sudah DROP, lanjut misi") self.vehicle.commands.next = 5 self.controlling_vehicle = False self.vehicle.mode = VehicleMode("AUTO") self.vehicle.flush() if active_command == 5: #DROP MP1 atau MP2 #self.servo(1)#kamera siap untuk drop if(location.z > 2.0): vz = 0.25 else: if (location.z > 1.6): #Menurunkan Kecepatan Descent vz = 0.2 else: vz = 0 if (self.c == True): self.relay2(0) #Capit OFF self.waktu = 0 self.c = False self.drop = 2 print ("Payload sudah DROP, lanjut misi") sc_logger.text(sc_logger.GENERAL, "Payload sudah DROP, lanjut misi") self.controlling_vehicle = False self.vehicle.mode = VehicleMode("AUTO") self.vehicle.flush() if active_command == 6: #DROP LOG #self.servo(1)#kamera siap untuk drop if(location.z > 2.5): vz = 0.25 else: if (location.z > 2.0): #Menurunkan Kecepatan Descent vz = 0.15 else: vz = 0 if (self.c == True): self.relay(1) #buka payload self.waktu = 0 self.c = False print ("Payload sudah DROP, lanjut misi") sc_logger.text(sc_logger.GENERAL, "Payload sudah DROP, lanjut misi") self.controlling_vehicle = False self.vehicle.mode = VehicleMode("AUTO") self.vehicle.flush() if active_command == 7: #PICK MP1 #Jika ketinggian sudah dibawah 4 meter, maka MAGNET ON #self.servo(0)#kamera siap untuk pick if (location.z < 4.0): if(location.z > 1.4): vz = 0.15 else: vz = 0 #active capit if (self.c == True): self.relay2(1) #magnet ON self.waktu = 0 self.c = False print ("Payload sudah diambil, lanjut misi") sc_logger.text(sc_logger.GENERAL, "Payload sudah diambil, lanjut misi") self.controlling_vehicle = False if (self.drop == 2): self.vehicle.commands.next = 8 self.lanjut_cmd += 1 self.vehicle.mode = VehicleMode("AUTO") self.vehicle.flush() if active_command == 9: #DROP MP2 #self.servo(1)#kamera siap untuk drop if(location.z > 2.0): vz = 0.25 else: if (location.z > 1.6): #Menurunkan Kecepatan Descent vz = 0.2 else: vz = 0 if (self.c == True): self.relay2(0) #magnet OFF self.waktu = 0 self.c = False print ("Payload sudah DROP, lanjut misi") sc_logger.text(sc_logger.GENERAL, "Payload sudah DROP, lanjut misi") self.controlling_vehicle = False self.vehicle.mode = VehicleMode("AUTO") self.vehicle.flush() #send velocity commands toward target heading self.send_nav_velocity(vx,vy,vz)
def distance(x1, y1, x2, y2): dist = math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)) return dist
def display_img(img, title_img): plt.title(title_img) plt.imshow(img.reshape(int(math.sqrt(img.shape[1])),-1), cmap='gray') plt.show()
def get_min_area(light): if (light > 10): light = 10 area = int((1000 * math.sqrt(light - 1)) + 100) print("min area= " + str(area)) return area
def dis(num_x1, num_y1, num_x2, num_y2): return math.sqrt((num_x1 - num_x2)**2 + (num_y1 - num_y2)**2)
def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'AdaBelief does not support sparse gradients, please consider SparseAdam instead' ) amsgrad = group['amsgrad'] state = self.state[p] beta1, beta2 = group['betas'] # State initialization if len(state) == 0: state['rho_inf'] = 2.0 / (1.0 - beta2) - 1.0 state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like( p.data, memory_format=torch.preserve_format ) if version_higher else torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_var'] = torch.zeros_like( p.data, memory_format=torch.preserve_format ) if version_higher else torch.zeros_like(p.data) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_var'] = torch.zeros_like( p.data, memory_format=torch.preserve_format ) if version_higher else torch.zeros_like(p.data) # get current state variable exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] state['step'] += 1 bias_correction1 = 1 - beta1**state['step'] bias_correction2 = 1 - beta2**state['step'] # perform weight decay, check if decoupled weight decay if self.weight_decouple: if not self.fixed_decay: p.data.mul_(1.0 - group['lr'] * group['weight_decay']) else: p.data.mul_(1.0 - group['weight_decay']) else: if group['weight_decay'] != 0: grad.add_(group['weight_decay'], p.data) # Update first and second moment running average exp_avg.mul_(beta1).add_(1 - beta1, grad) grad_residual = grad - exp_avg exp_avg_var.mul_(beta2).addcmul_(1 - beta2, grad_residual, grad_residual) if amsgrad: max_exp_avg_var = state['max_exp_avg_var'] # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_var, exp_avg_var, out=max_exp_avg_var) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) if not self.rectify: # Default update step_size = group['lr'] / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) else: # Rectified update # calculate rho_t state['rho_t'] = state['rho_inf'] - 2 * state[ 'step'] * beta2**state['step'] / (1.0 - beta2**state['step']) if state['rho_t'] > 4: # perform Adam style update if variance is small rho_inf, rho_t = state['rho_inf'], state['rho_t'] rt = (rho_t - 4.0) * (rho_t - 2.0) * rho_inf / ( rho_inf - 4.0) / (rho_inf - 2.0) / rho_t rt = math.sqrt(rt) step_size = rt * group['lr'] / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) else: # perform SGD style update p.data.add_(-group['lr'], exp_avg) return loss
def get_norm(self, vector): squared_sum = 0 for i in range(self.n): squared_sum += vector[i] * vector[i] return math.sqrt(squared_sum)
def is_prime(x): for i in range(2, floor(sqrt(x)) + 1): if x % i == 0: return False return True
def get_Goldschmidt_TF(row): return (row.rA_avg + row.rO)/(math.sqrt(2)*(row.rB_avg + row.rO))
def tassellate(ob0, ob1, offset, zscale, gen_modifiers, com_modifiers, mode, scale_mode, randomize, rand_seed, fill_mode): random.seed(rand_seed) if gen_modifiers: me0 = ob0.to_mesh(bpy.context.scene, apply_modifiers=True, settings='PREVIEW') else: me0 = ob0.data if com_modifiers: me1 = ob1.to_mesh(bpy.context.scene, apply_modifiers=True, settings='PREVIEW') else: me1 = ob1.data verts0 = me0.vertices n_verts = len(me1.vertices) n_edges = len(me1.edges) n_faces = len(me1.polygons) loc = ob1.location dim = ob1.dimensions scale = ob1.scale new_verts = [] new_edges = [] new_faces = [] new_verts_np = np.array(()) min = Vector((0, 0, 0)) max = Vector((0, 0, 0)) first = True for v in me1.vertices: vert = (ob1.matrix_world * v.co) if vert[0] < min[0] or first: min[0] = vert[0] if vert[1] < min[1] or first: min[1] = vert[1] if vert[2] < min[2] or first: min[2] = vert[2] if vert[0] > max[0] or first: max[0] = vert[0] if vert[1] > max[1] or first: max[1] = vert[1] if vert[2] > max[2] or first: max[2] = vert[2] first = False bb = max - min verts1 = [] for v in me1.vertices: if mode == "ADAPTIVE": vert = (ob1.matrix_world * v.co) - min vert[0] = vert[0] / bb[0] vert[1] = vert[1] / bb[1] vert[2] = (vert[2] + (-0.5 + offset * 0.5) * bb[2]) * zscale else: vert = v.co vert[2] *= zscale verts1.append(vert) # component vertices vs1 = np.array([v for v in verts1]).reshape(len(verts1), 3, 1) vx = vs1[:, 0] vy = vs1[:, 1] vz = vs1[:, 2] # component polygons fs1 = [[i for i in p.vertices] for p in me1.polygons] new_faces = fs1[:] j = 0 if fill_mode == 'FAN': fan_verts = [v.co.to_tuple() for v in me0.vertices] fan_polygons = [] for p in me0.polygons: fan_center = Vector((0, 0, 0)) for v in p.vertices: fan_center += me0.vertices[v].co fan_center /= len(p.vertices) last_vert = len(fan_verts) fan_verts.append(fan_center.to_tuple()) for i in range(len(p.vertices)): fan_polygons.append( (p.vertices[i], p.vertices[(i + 1) % len(p.vertices)], last_vert, last_vert)) print(fan_verts) print(fan_polygons) fan_me = bpy.data.meshes.new('Fan.Mesh') fan_me.from_pydata(tuple(fan_verts), [], tuple(fan_polygons)) me0 = fan_me verts0 = me0.vertices for p in me0.polygons: #polygon vertices if randomize: shifted_vertices = [] n_poly_verts = len(p.vertices) rand = random.randint(0, n_poly_verts) for i in range(n_poly_verts): shifted_vertices.append(p.vertices[(i + rand) % n_poly_verts]) vs0 = np.array([verts0[i].co for i in shifted_vertices]) nvs0 = np.array([verts0[i].normal for i in shifted_vertices]) else: vs0 = np.array([verts0[i].co for i in p.vertices]) nvs0 = np.array([verts0[i].normal for i in p.vertices]) vs0 = np.array((vs0[0], vs0[1], vs0[2], vs0[-1])) #polygon normals nvs0 = np.array((nvs0[0], nvs0[1], nvs0[2], nvs0[-1])) v0 = vs0[0] + (vs0[1] - vs0[0]) * vx v1 = vs0[3] + (vs0[2] - vs0[3]) * vx v2 = v0 + (v1 - v0) * vy nv0 = nvs0[0] + (nvs0[1] - nvs0[0]) * vx nv1 = nvs0[3] + (nvs0[2] - nvs0[3]) * vx nv2 = nv0 + (nv1 - nv0) * vy v3 = v2 + nv2 * vz * (sqrt(p.area) if scale_mode == "ADAPTIVE" else 1) if j == 0: new_verts_np = v3 else: new_verts_np = np.concatenate((new_verts_np, v3), axis=0) for p in fs1: new_faces.append([i + n_verts * j for i in p]) j += 1 new_verts = new_verts_np.tolist() new_name = ob0.name + "_" + ob1.name new_me = bpy.data.meshes.new(new_name) new_me.from_pydata(new_verts, [], new_faces) #new_me.from_pydata(new_verts, new_edges, []) new_me.update() return new_me
# На плоскости заданы координаты трех точек... (Лабораторная работа №3) from math import sqrt, acos, pi # Ввод данных Ax = int(input('Введите координату x точки A: ')) Ay = int(input('Введите координату y точки A: ')) Bx = int(input('Введите координату x точки B: ')) By = int(input('Введите координату y точки B: ')) Cx = int(input('Введите координату x точки C: ')) Cy = int(input('Введите координату y точки C: ')) print() # Нахождение длин сторон треугольника AB = sqrt((Ax - Bx) ** 2 + ((Ay - By) ** 2)) BC = sqrt((Bx - Cx) ** 2 + ((By - Cy) ** 2)) AC = sqrt((Ax - Cx) ** 2 + ((Ay - Cy) ** 2)) # Проверка существования треугольника cont = 0 if (AB + BC > AC) and (AB + AC > BC) and (BC + AC > AB): # Сумма двух сторон больше третьей cont = 1 else: print("Существование треугольника невозможно") # Вывод длин сторон треугольника if cont == 1: print("Длина AB =", '{:.5g}'.format(AB))
def distance(a, b): ''' return distance bw two points ''' return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)
def euclidean_distance(row1, row2): distance = 0.0 for i in range(len(row1) - 1): distance += (row1[i] - row2[i]) ** 2 return sqrt(distance)