def makesumrule(ptype,plen,ts,lagtype='centered'): """ This function will return the sum rule. Inputs ptype - The type of pulse. plen - Length of the pulse in seconds. ts - Sample time in seconds. lagtype - Can be centered forward or backward. Output sumrule - A 2 x nlags numpy array that holds the summation rule. """ nlags = sp.floor(plen/ts) if ptype.lower()=='long': if lagtype=='forward': arback=-sp.arange(nlags,dtype=int) arforward = sp.zeros(nlags,dtype=int) elif lagtype=='backward': arback = sp.zeros(nlags,dtype=int) arforward=sp.arange(nlags,dtype=int) else: arback = -sp.ceil(sp.arange(0,nlags/2.0,0.5)).astype(int) arforward = sp.floor(sp.arange(0,nlags/2.0,0.5)).astype(int) sumrule = sp.array([arback,arforward]) elif ptype.lower()=='barker': sumrule = sp.array([[0],[0]]) return sumrule
def prob4(filename='saw.wav', new_rate=11025, outfile='prob4.wav'): """Down-samples a given .wav file to a new rate and saves the resulting signal as another .wav file. Parameters ---------- filename : string, optional The name of the .wav sound file to be down-sampled. Defaults to 'saw.wav'. new_rate : integer, optional The down-sampled rate. Defaults to 11025. outfile : string, optional The name of the new file. Defaults to prob4.wav. Returns ------- None """ old_rate, in_sig = wavfile.read(filename) fin = fftw.fft(sp.float32(in_sig)) # Use if scipy_fftpack is unavailable # fin = sp.fft(sp.float32(in_sig)) nsiz = sp.floor(in_sig.size * new_rate / old_rate) nsizh = sp.floor(nsiz / 2) fout = sp.zeros(nsiz) + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz - nsizh + 1:] = sp.conj(sp.flipud(fout[1:nsizh])) out = sp.real(sp.ifft(fout)) out = sp.int16(out / sp.absolute(out).max() * 32767) plot_signal(filename) wavfile.write('prob4.wav', new_rate, out) print "" plot_signal('prob4.wav')
def misfit_xy(p, data, aModZ, X, Y): x0, y0 = p x1 = x0/dx - scipy.floor(X.mean()/dx) y1 = y0/dy - scipy.floor(Y.mean()/dy) m = (aModZ/data) - scipy.exp(2j*numpy.pi*(kx*x1 + ky*y1)) return (numpy.sqrt((m*m.conj()).real)*abs(aModZ)).ravel()
def people_wall_distance(self, xyr, I = None, J = None): """This function determines distances to the nearest wall for all people Parameters ---------- xyr: numpy array people coordinates and radius: ``x,y,r`` I: numpy array (None by default) people index ``i`` J: numpy array (None by default) people index ``j`` Returns ------- I: numpy array people index ``i`` J: numpy array people index ``j`` D: numpy array distances to the nearest wall """ if ((I is None) or (J is None)): I = sp.floor((xyr[:,1]-self.ymin-0.5*self.pixel_size)/self.pixel_size).astype(int) J = sp.floor((xyr[:,0]-self.xmin-0.5*self.pixel_size)/self.pixel_size).astype(int) D = self.wall_distance[I,J]-xyr[:,2] return I,J,D
def people_target_distance(self, xyr, people_dest, I = None, J = None): """This function determines distances to the current target for all people Parameters ---------- xyr: numpy array people coordinates and radius: ``x,y,r`` people_dest: list of string destination for each individual I: numpy array (None by default) people index ``i`` J: numpy array (None by default) people index ``j`` Returns ------- I: numpy array people index i J: numpy array people index j D: numpy array distances to the current target """ if ((I is None) or (J is None)): I = sp.floor((xyr[:,1]-self.ymin-0.5*self.pixel_size)/self.pixel_size).astype(int) J = sp.floor((xyr[:,0]-self.xmin-0.5*self.pixel_size)/self.pixel_size).astype(int) D = np.zeros(xyr.shape[0]) for id,dest_name in enumerate(np.unique(people_dest)): ind = np.where(np.array(people_dest)==dest_name)[0] D[ind] = self.destinations[dest_name].distance[I[ind],J[ind]]-xyr[ind,2] return I,J,D
def down_sample(filename, new_rate, outputfile=None): """ Create a down-sampled copy of the provided .wav file. Unless overridden, the output file will be of the form "down_<orginalname>.wav" Parameters ---------- filename : string input .wav file new_rate : int sample rate of output file outputfile : string name of output file """ if outputfile is None: outputfile = "down_" + filename old_rate, in_sig = wavfile.read(filename) in_sig = sp.float32(in_sig) fin = sp.fft(in_sig) nsiz = sp.floor(in_sig.size * new_rate / old_rate) nsizh = sp.floor(nsiz / 2) fout = sp.zeros(nsiz) fout = fout + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz - nsizh + 1 :] = sp.conj(sp.flipud(fout[1:nsizh])) out = sp.ifft(fout) out = sp.real(out) # Take the real component of the signal out = sp.int16(out / sp.absolute(out).max() * 32767) wavfile.write(outputfile, new_rate, out)
def ellipse2bbox(a, b, angle, cx, cy): a, b = max(a, b), min(a, b) ca = sp.cos(angle) sa = sp.sin(angle) if sa == 0.0: cta = 2.0 / sp.pi else: cta = ca / sa if ca == 0.0: ta = sp.pi / 2.0 else: ta = sa / ca x = lambda t: cx + a * sp.cos(t) * ca - b * sp.sin(t) * sa y = lambda t: cy + b * sp.sin(t) * ca + a * sp.cos(t) * sa # x = cx + a * cos(t) * cos(angle) - b * sin(t) * sin(angle) # tan(t) = -b * tan(angle) / a tx1 = sp.arctan(-b * ta / a) tx2 = tx1 - sp.pi x1, y1 = x(tx1), y(tx1) x2, y2 = x(tx2), y(tx2) # y = cy + b * sin(t) * cos(angle) + a * cos(t) * sin(angle) # tan(t) = b * cot(angle) / a ty1 = sp.arctan(b * cta / a) ty2 = ty1 - sp.pi x3, y3 = x(ty1), y(ty1) x4, y4 = x(ty2), y(ty2) minx, maxx = Util.minmax([x1, x2, x3, x4]) miny, maxy = Util.minmax([y1, y2, y3, y4]) return sp.floor(minx), sp.floor(miny), sp.ceil(maxx), sp.ceil(maxy)
def sqrtContFrac(n): """ Computes the continued fraction of the sqrt of n For non-square n, returns in the form [a0,a1,a2,(a3,a4,a5)] where the tuple (a3, a4, a5) is the repeated part. No good for square n. So don't use it. - Kelvin =) """ rootn = sp.sqrt(n) a = [] a.append(int(sp.floor(rootn))) bLast = Fraction(0) cLast = Fraction(1) abcList = [] while not (a[-1], bLast, cLast) in abcList: abcList.append((a[-1], bLast, cLast)) bNext = cLast * a[-1] - bLast cNext = (n - bNext**2)/cLast aNext = int(sp.floor((rootn + bNext)/cNext)) a.append(aNext) bLast = bNext cLast = cNext repeatIndex = abcList.index((a[-1],bLast,cLast)) a.pop() contFraction = a[:repeatIndex] repeat = tuple(a[repeatIndex:]) contFraction.append(repeat) return contFraction
def prob4(filename='saw.wav', new_rate = 11025, outfile='prob4.wav'): """Down-samples a given .wav file to a new rate and saves the resulting signal as another .wav file. Parameters ---------- filename : string, optional The name of the .wav sound file to be down-sampled. Defaults to 'saw.wav'. new_rate : integer, optional The down-sampled rate. Defaults to 11025. outfile : string, optional The name of the new file. Defaults to prob4.wav. Returns ------- None """ old_rate, in_sig = wavfile.read(filename) fin = fftw.fft(sp.float32(in_sig)) # Use if scipy_fftpack is unavailable # fin = sp.fft(sp.float32(in_sig)) nsiz = sp.floor(in_sig.size * new_rate / old_rate) nsizh = sp.floor(nsiz / 2) fout = sp.zeros(nsiz) + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh])) out = sp.real(sp.ifft(fout)) out = sp.int16(out/sp.absolute(out).max() * 32767) plot_signal(filename) wavfile.write('prob4.wav',new_rate,out) print ""; plot_signal('prob4.wav')
def GetHHMMSS(self): hh = floor(self.ut) dummy = self.ut - hh mm = floor(dummy * 60) dummy = dummy * 60 - mm self.second = int(floor(dummy * 60)) self.hour, self.minute = int(hh), int(mm)
def get_4_squares(parent1, parent2): n_folds = 2 levels1 = np.unique(parent1) levels2 = np.unique(parent2) N1 = len(levels1) N2 = len(levels2) r1 = sp.random.permutation(N1) r2 = sp.random.permutation(N2) Icv1 = sp.floor(((sp.ones((N1))*n_folds)*r1)/N1) Icv2 = sp.floor(((sp.ones((N2))*n_folds)*r2)/N2) train_parents1 = levels1[Icv1 != 0] train_parents2 = levels2[Icv2 != 0] test_parents1 = levels1[Icv1 == 0] test_parents2 = levels2[Icv2 == 0] train_ind1 = np.array([e in train_parents1 for e in parent1], dtype=bool) train_ind2 = np.array([e in train_parents2 for e in parent2], dtype=bool) test_ind1 = np.array([e in test_parents1 for e in parent1], dtype=bool) test_ind2 = np.array([e in test_parents2 for e in parent2], dtype=bool) Itest = test_ind1 & test_ind2 Itrain_distant = train_ind1 & train_ind2 Itrain_close1 = (train_ind1 & test_ind2) Itrain_close2 = (train_ind2 & test_ind1) Itrain_close = select_subset(Itrain_close1 | Itrain_close2, Itest.sum()) return Itest, Itrain_distant, Itrain_close1, Itrain_close2, Itrain_close
def update(i): global t, arrow_magn, shrink_factor, full_size for k in range(capture_interval): observedWind.update(dt) wind_field.update(dt) plume_model.update(dt) t += dt velocity_field = wind_field.velocity_field u, v = velocity_field[:, :, 0], velocity_field[:, :, 1] u,v = u[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor],\ v[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor] vector_field.set_UVC(u, v) x_wind, y_wind = observedWind.current_value() wind_arrow.set_positions( (xmin + (xmax - xmin) / 2, ymax - 0.2 * (ymax - ymin)), (xmin + (xmax - xmin) / 2 + arrow_magn * x_wind, ymax - 0.2 * (ymax - ymin) + arrow_magn * y_wind)) text = '{0} min {1} sec'.format(int(scipy.floor(abs(t / 60.))), int(scipy.floor(abs(t) % 60.))) timer.set_text(text) conc_array = array_gen.generate_single_array(plume_model.puff_array) conc_im.set_data(conc_array.T[::-1]) concStorer.store(conc_array.T[::-1]) last = time.time() windStorer.store(velocity_field) plumeStorer.store(plume_model.puff_array) return [conc_im]
def bilinear(img, px, py): # find the four neighbors's position x0 = int(floor(px)) y0 = int(floor(py)) x1 = int(ceil(px)) y1 = int(ceil(py)) # return black if the position is out of image's range if y1 not in range(0, img.shape[0]) or x1 not in range(0, img.shape[1]): return 0 # calculate the new image's pixel BGR value b_x0y0, g_x0y0, r_x0y0 = cv.split(img[y0, x0])[0] b_x0y1, g_x0y1, r_x0y1 = cv.split(img[y1, x0])[0] b_x1y0, g_x1y0, r_x1y0 = cv.split(img[y0, x1])[0] b_x1y1, g_x1y1, r_x1y1 = cv.split(img[y1, x1])[0] wx0y0 = (x1 - px) * (y1 - py) wx0y1 = (x1 - px) * (py - y0) wx1y0 = (px - x0) * (y1 - py) wx1y1 = (px - x0) * (py - y0) b_result = b_x0y0 * wx0y0 + b_x0y1 * wx0y1 + b_x1y0 * wx1y0 + b_x1y1 * wx1y1 g_result = g_x0y0 * wx0y0 + g_x0y1 * wx0y1 + g_x1y0 * wx1y0 + g_x1y1 * wx1y1 r_result = r_x0y0 * wx0y0 + r_x0y1 * wx0y1 + r_x1y0 * wx1y0 + r_x1y1 * wx1y1 result = cv.merge([b_result, g_result, r_result]) return result
def calcProfilV(self, xy): """renvoie les valeurs des vitesses sur une section""" vxvy = self.getMfVitesse() grd = self.parent.aquifere.getFullGrid() x0, y0, dx, dy, nx, ny = grd['x0'], grd['y0'], grd['dx'], grd['dy'], grd[ 'nx'], grd['ny'] x, y = zip(*xy) xl0, xl1 = x[:2] yl0, yl1 = y[:2] dd = min(dx, dy) * .95 dxp, dyp = xl1 - xl0, yl1 - yl0 ld = max(ceil(abs(dxp / dx)), ceil(abs(dyp / dy))) ld = int(ld + 1) ddx = dxp / ld ddy = dyp / ld xp2 = xl0 + arange(ld + 1) * ddx yp2 = yl0 + arange(ld + 1) * ddy ix = floor((xp2 - x0) / dx) ix = clip(ix.astype(int), 0, nx - 1) iy = floor((yp2 - y0) / dy) iy = clip(iy.astype(int), 0, ny - 1) vx = take(ravel(vxvy[0]), iy * nx + ix) vy = take(ravel(vxvy[1]), iy * nx + ix) V = sqrt(vx**2 + vy**2) cu = sqrt((xp2 - xp2[0])**2 + (yp2 - yp2[0])**2) return [cu, V]
def seq_threeparcounterfit(y, t, f0, diff=False): """ period-wise (single-)sinefit to the linearly increasing heterodyne counter version based on "Blume et al. "\n y vector of sampled counter values t vector of sample times f given frequency\n \n returns (n,3)-matrix of coefficient-triplets [a,b,c] per period \n if diff=True use differentiation to remove carrier (c.f. source) """ Tau = 1.0 / f0 dt = t[1] - t[0] N = int(sp.floor(Tau / dt)) ## samples per section M = int(sp.floor(t.size / N)) ## number of sections or periods remove_counter_carrier(y, diff=diff) abc = sp.zeros((M, 4)) for i in range(int(M)): ti = t[i * N:(i + 1) * N] yi = y[i * N:(i + 1) * N] abc[i, :] = threeparsinefit_lin(yi, ti, f0) return abc ## matrix of all fit vectors per period
def makesumrule(ptype, plen, ts, lagtype='centered'): """ This function will return the sum rule. Inputs ptype - The type of pulse. plen - Length of the pulse in seconds. ts - Sample time in seconds. lagtype - Can be centered forward or backward. Output sumrule - A 2 x nlags numpy array that holds the summation rule. """ nlags = sp.floor(plen / ts) if ptype.lower() == 'long': if lagtype == 'forward': arback = -sp.arange(nlags, dtype=int) arforward = sp.zeros(nlags, dtype=int) elif lagtype == 'backward': arback = sp.zeros(nlags, dtype=int) arforward = sp.arange(nlags, dtype=int) else: arback = -sp.ceil(sp.arange(0, nlags / 2.0, 0.5)).astype(int) arforward = sp.floor(sp.arange(0, nlags / 2.0, 0.5)).astype(int) sumrule = sp.array([arback, arforward]) elif ptype.lower() == 'barker': sumrule = sp.array([[0], [0]]) return sumrule
def Spectral_Gradient(nx, ny, lx, ly): # Create wavenumber vector for x-direction tmp1 = sc.linspace(0, nx/2, int(nx/2+1))*2*sc.pi/lx tmp2 = sc.linspace(1-nx/2, -1, int(nx/2-1))*2*sc.pi/lx kx = sc.concatenate((tmp1, tmp2)) # Create wavenumber vector for y-direction tmp1 = sc.linspace(0, ny/2, int(ny/2+1))*2*sc.pi/ly tmp2 = sc.linspace(1-ny/2, -1, int(ny/2-1))*2*sc.pi/ly ky = sc.concatenate((tmp1, tmp2)) # Dealiasing with the 2/3 rule trunc_x_low = int(sc.floor(2/3*nx/2))+1 trunc_x_high = int(sc.ceil(4/3*nx/2)) kx[trunc_x_low:trunc_x_high] = sc.zeros(trunc_x_high - trunc_x_low) trunc_y_low = int(sc.floor(2/3*ny/2))+1 trunc_y_high = int(sc.ceil(4/3*ny/2)) ky[trunc_y_low:trunc_y_high] = sc.zeros(trunc_y_high - trunc_y_low) # Create Gradient operators in Fourier domain for x- and y-direction Kx, Ky = sc.meshgrid(ky, kx) Kx = 1j*Kx Ky = 1j*Ky return Kx, Ky
def compute_desired_velocity(dom, people): """ This function determines people desired velocities from the desired \ velocity array computed by Domain thanks to a fast-marching method. Parameters ---------- dom: Domain contains everything for managing the domain people: numpy array people coordinates and radius : x,y,r Returns ------- I : numpy array people index i J : numpy array people index j Vd : numpy array people desired velocity """ I = sp.floor((people[:, 1] - dom.ymin - 0.5 * dom.pixel_size) / dom.pixel_size).astype(int) J = sp.floor((people[:, 0] - dom.xmin - 0.5 * dom.pixel_size) / dom.pixel_size).astype(int) Vd = sp.zeros((people.shape[0], 2)) Vd[:, 0] = dom.desired_velocity_X[I, J] Vd[:, 1] = dom.desired_velocity_Y[I, J] return I, J, Vd
def people_desired_velocity(self, xyr, people_dest, I=None, J=None): """This function determines people desired velocities from the desired \ velocity array computed by Domain thanks to a fast-marching method. Parameters ---------- xyr: numpy array people coordinates and radius: x,y,r people_dest: list of string destination for each individual I: numpy array (None by default) people index i J: numpy array (None by default) people index j Returns ------- I: numpy array people index i J: numpy array people index j Vd: numpy array people desired velocity """ if ((I is None) or (J is None)): I = sp.floor((xyr[:,1]-self.ymin-0.5*self.pixel_size)/self.pixel_size).astype(int) J = sp.floor((xyr[:,0]-self.xmin-0.5*self.pixel_size)/self.pixel_size).astype(int) Vd = sp.zeros( (xyr.shape[0],2) ) for id,dest_name in enumerate(np.unique(people_dest)): ind = np.where(np.array(people_dest)==dest_name)[0] scale = self.destinations[dest_name].velocity_scale Vd[ind,0] = xyr[ind,3]*scale*self.destinations[dest_name].desired_velocity_X[I[ind],J[ind]] Vd[ind,1] = xyr[ind,3]*scale*self.destinations[dest_name].desired_velocity_Y[I[ind],J[ind]] return I,J,Vd
def get_4_squares(parent1, parent2): n_folds = 2 levels1 = np.unique(parent1) levels2 = np.unique(parent2) N1 = len(levels1) N2 = len(levels2) r1 = sp.random.permutation(N1) r2 = sp.random.permutation(N2) Icv1 = sp.floor(((sp.ones((N1)) * n_folds) * r1) / N1) Icv2 = sp.floor(((sp.ones((N2)) * n_folds) * r2) / N2) train_parents1 = levels1[Icv1 != 0] train_parents2 = levels2[Icv2 != 0] test_parents1 = levels1[Icv1 == 0] test_parents2 = levels2[Icv2 == 0] train_ind1 = np.array([e in train_parents1 for e in parent1], dtype=bool) train_ind2 = np.array([e in train_parents2 for e in parent2], dtype=bool) test_ind1 = np.array([e in test_parents1 for e in parent1], dtype=bool) test_ind2 = np.array([e in test_parents2 for e in parent2], dtype=bool) Itest = test_ind1 & test_ind2 Itrain_distant = train_ind1 & train_ind2 Itrain_close1 = (train_ind1 & test_ind2) Itrain_close2 = (train_ind2 & test_ind1) Itrain_close = select_subset(Itrain_close1 | Itrain_close2, Itest.sum()) return Itest, Itrain_distant, Itrain_close1, Itrain_close2, Itrain_close
def eqns_wrapper(x, t, I, dt): deriv = [] Iinj = I[0][int(sp.floor(t / dt))] I_time = I[1][int(sp.floor(t / dt))] input = list(x) + [Iinj, I_time] for eq in eqns: deriv.append(eq(*input)) return deriv
def exit_out_of_domain(dom, people, arrays=[], box=None): """ Removes individuals who are outside the domain or outside a given box Parameters ---------- dom: Domain contains everything for managing the domain people: numpy array people coordinates and radius : x,y,r arrays: list of numpy array other arrays to resize similarly as people and U box: numpy array box coordinates [xmin,xmax,ymin,ymax] which replace the \ domain minimum and maximum coordinates Returns ------- people: numpy array new people array (outside individuals had been removed) arrays: list of numpy array new arrays resized similarly as people array """ if box is None: ## Remove people who are outside the domain S = (people[:,0]-people[:,2]<=dom.xmin+dom.pixel_size) + \ (people[:,0]-people[:,2]>=dom.xmax-dom.pixel_size) + \ (people[:,1]-people[:,2]<=dom.ymin+dom.pixel_size) + \ (people[:,1]-people[:,2]>=dom.ymax-dom.pixel_size) else: ## Remove people who are outside the given box S = (people[:,0]-people[:,2]<=box[0]+dom.pixel_size) + \ (people[:,0]-people[:,2]>=box[1]-dom.pixel_size) + \ (people[:,1]-people[:,2]<=box[2]+dom.pixel_size) + \ (people[:,1]-people[:,2]>=box[3]-dom.pixel_size) ind = sp.where(S == False)[0] people = people[ind, :] if (len(arrays) > 0): for a in arrays: a = a[ind] ## Remove people who are too close to walls or with a masked door distance I = sp.floor((people[:, 1] - dom.ymin - 0.5 * dom.pixel_size) / dom.pixel_size).astype(int) J = sp.floor((people[:, 0] - dom.xmin - 0.5 * dom.pixel_size) / dom.pixel_size).astype(int) Dwall = dom.wall_distance[I, J] - people[:, 2] Ddoor = dom.door_distance[I, J] indDwall = sp.where(Dwall <= dom.pixel_size)[0] indDdoor = sp.where(Ddoor.mask == True)[0] ind = sp.unique(sp.concatenate((indDwall, indDdoor))) comp_ind = sp.setdiff1d(sp.arange(people.shape[0]), ind) if (len(arrays) > 0): return people[comp_ind, :], [a[comp_ind] for a in arrays] else: return people[comp_ind, :]
def f_save_data(self, outputfile): ''' Save treated data to ASCII ''' if outputfile!= '': self.outputdir = outputfile if self.inputdir=='': self.inputdir=self.outputdir thinout = int(self.lineEdit_13.text()) # Sauvegarde de la montee et de la descente du champ if self.checkBox_7.isChecked() and self.checkBox_8.isChecked(): out_data = zeros((floor(len(self.data[0::thinout, 0])), 3+2*+len(self.coldata))) out_data[:, 0] = self.data[0::thinout, 0] out_data[:, 1] = self.data[0::thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area out_data[:, 2] = self.data[0::thinout, 1] for j in range(0, len(self.coldata)): out_data[:, 2*j+3] = self.sig_out[0::thinout, 2*j]/self.intgain*1e3 out_data[:, 2*j+4] = self.sig_out[0::thinout, 2*j+1]/self.intgain*1e3 # Sauvegarde de la montee du champ uniquement elif self.checkBox_7.isChecked(): out_data = zeros((floor(len(self.data[0:self.f_max:thinout, 0])), 3+2*+len(self.coldata))) out_data[:, 0] = self.data[0:self.f_max:thinout, 0] out_data[:, 1] = self.data[0:self.f_max:thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area out_data[:, 2] = self.data[0:self.f_max:thinout, 1] for j in range(0, len(self.coldata)): out_data[:, 2*j+3] = self.sig_out[0:self.f_max:thinout, 2*j]/self.intgain*1e3 out_data[:, 2*j+4] = self.sig_out[0:self.f_max:thinout, 2*j+1]/self.intgain*1e3 # Sauvegarde de la descente du champ uniquement elif self.checkBox_8.isChecked(): out_data = zeros((floor(len(self.data[self.f_max::thinout, 0])), 3+2*+len(self.coldata))) out_data[:, 0] = self.data[self.f_max::thinout, 0] out_data[:, 1] = self.data[self.f_max::thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area out_data[:, 2] = self.data[self.f_max::thinout, 1] for j in range(0, len(self.coldata)): out_data[:, 2*j+3] = self.sig_out[self.f_max::thinout, 2*j]/self.intgain*1e3 out_data[:, 2*j+4] = self.sig_out[self.f_max::thinout, 2*j+1]/self.intgain*1e3 # Sinon on sauvegarde tout else: out_data = zeros((floor(len(self.data[0::thinout, 0])), 3+2*+len(self.coldata))) out_data[:, 0] = self.data[0::thinout, 0] out_data[:, 1] = self.data[0::thinout, 2+len(self.colref)+len(self.coldata)]/self.pu_area out_data[:, 2] = self.data[0::thinout, 1] for j in range(0, len(self.coldata)): out_data[:, 2*j+3] = self.sig_out[0::thinout, 2*j]/self.intgain*1e3 out_data[:, 2*j+4] = self.sig_out[0::thinout, 2*j+1]/self.intgain*1e3 f_handle = file(str(outputfile), 'w') f_handle.write('#time\tB\tdBdt\tin_phase\tout_phase\n') savetxt(f_handle, out_data[0:len(out_data[:, 0])-2, :], fmt = '%10g', delimiter = '\t') f_handle.close() self.label_23.setText('Data saved')
def wigner3j(j1, j2, j3, m1, m2, m3): #====================================================================== # Wigner3j.m by David Terr, Raytheon, 6-17-04 # # Compute the Wigner 3j symbol using the Racah formula [1]. # # Usage: # from wigner import Wigner3j # wigner = Wigner3j(j1,j2,j3,m1,m2,m3) # # / j1 j2 j3 \ # | | # \ m1 m2 m3 / # # Reference: Wigner 3j-Symbol entry of Eric Weinstein's Mathworld: # http://mathworld.wolfram.com/Wigner3j-Symbol.html #====================================================================== # Error checking if ((2 * j1 != floor(2 * j1)) | (2 * j2 != floor(2 * j2)) | (2 * j3 != floor(2 * j3)) | (2 * m1 != floor(2 * m1)) | (2 * m2 != floor(2 * m2)) | (2 * m3 != floor(2 * m3))): raise ValueError('All arguments must be integers or half-integers.') # Additional check if the sum of the second row equals zero if (m1 + m2 + m3 != 0 or j1 - m1 != floor(j1 - m1) or j2 - m2 != floor(j2 - m2) or j3 - m3 != floor(j3 - m3) or j3 - m3 != floor(j3 - m3) or j3 > j1 + j2 or j3 < abs(j1 - j2) or abs(m1) > j1 or abs(m2) > j2 or abs(m3) > j3): return 0 t1 = j2 - m1 - j3 t2 = j1 + m2 - j3 t3 = j1 + j2 - j3 t4 = j1 - m1 t5 = j2 + m2 tmin = max(0, max(t1, t2)) tmax = min(t3, min(t4, t5)) tvec = arange(tmin, tmax + 1, 1) wigner = 0 for t in tvec: wigner += (-1)**t / (factorial(t) * factorial(t - t1) * factorial(t - t2) * factorial(t3 - t) * factorial(t4 - t) * factorial(t5 - t)) return wigner * (-1)**(j1 - j2 - m3) * sqrt( factorial(j1 + j2 - j3) * factorial(j1 - j2 + j3) * factorial(-j1 + j2 + j3) / factorial(j1 + j2 + j3 + 1) * factorial(j1 + m1) * factorial(j1 - m1) * factorial(j2 + m2) * factorial(j2 - m2) * factorial(j3 + m3) * factorial(j3 - m3))
def test_optdiv1(beta=0.9, pHigh=0.75, grid=scipy.arange(21.0), useValueIter=True): time1 = time.time() localvars = {} def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult): global g_iterList (stoppingDecision, diff) = stoppingResult print("iter %d, diff %f" % (nIter, diff)) localvars[0] = nIter def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult): (stoppingDecision, diff) = stoppingResult print("iter %d, diff %f" % (nIter, diff)) localvars[0] = nIter initialVArray = grid; # initial guess for V: a linear fn initialPolicyArray = grid; # initial guess for d: pay out everything utilityFn = lambda x: x; # linear utility zStates = [-1.0, 1.0]; zProbs = [1.0-pHigh, pHigh]; # income shock params = OptDivParams1(utilityFn, beta, zStates, zProbs, grid); # don't use parallel search with this, since it makes a callback to Python if (useValueIter == True): result = bellman.grid_valueIteration([grid], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=False) (nIter, currentVArray, newVArray, optControls) = result else: result = bellman.grid_policyIteration([grid], [initialPolicyArray], initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=False) (nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result newVArray = currentVArray optControls = currentPolicyArrayList time2 = time.time() nIters = localvars[0] print("total time: %f, avg time: %f" % (time2-time1, (time2-time1)/nIters)) print("x_0 == 0: %d" % alwaysPayAll(beta, pHigh)) n0 = getn0(beta, pHigh) optd_fn = linterp.LinInterp1D(grid, optControls[0]) print("n0: %f, d(floor(n0)): %f" % (n0, optd_fn(scipy.floor(n0)))) # plot V fig = plt.figure() ax = fig.add_subplot(111) ax.plot(grid, newVArray) ax.set_xlabel("M") ax.set_ylabel("V") # plot optimal d fig = plt.figure() ax = fig.add_subplot(111) ax.plot(grid, optControls[0]) ax.axvline(scipy.floor(n0), color='gray') ax.set_xlabel("M") ax.set_ylabel("optimal d") plt.show() return result
def get_cb_ticks(values): min_tick = sp.nanmin(values) max_tick = sp.nanmax(values) med_tick = min_tick + (max_tick - min_tick) / 2.0 if max_tick > 1.0: min_tick = sp.ceil(min_tick) max_tick = sp.floor(max_tick) med_tick = sp.around(med_tick) else: min_tick = sp.ceil(min_tick * 100.0) / 100.0 max_tick = sp.floor(max_tick * 100.0) / 100.0 med_tick = sp.around(med_tick, 2) return [min_tick, med_tick, max_tick]
def westheimer_simple(sz, h, w, cval, sval, sep): """""" stim = sp.zeros((sz, sz)) stim[:] = sp.nan a = sz // 2 - int(sp.floor(h / 2.0)) b = sz // 2 + int(sp.ceil(h / 2.0)) c = sz // 2 - int(sp.floor(w / 2.0)) d = sz // 2 + int(sp.ceil(w / 2.0)) stim[a:b, c:d] = cval # central bar stim[a:b, c - w - sep:d - w - sep] = sval # left-to-central bar stim[a:b, c + w + sep:d + w + sep] = sval # right-to-central bar return stim
def add_people_in_box(Np, dom, xmin, xmax, ymin, ymax, rmin, rmax, rng): """ Adds new persons in the box [xmin,xmax]x[ymin,ymax] Be careful : overlaps can occur... Parameters ---------- Np: int Number of persons dom: Domain contains everything for managing the domain xmin: float minimal abscissa of the box xmax : float maximal abscissa of the box ymin: float minimal ordinate of the box ymax: float maximal ordinate of the box rmin: float minimum radius for the individuals rmax: float maximum radius for the individuals rng: scipy.random.RandomState container for the Mersenne Twister pseudo-random number generator Returns ------- people : numpy array people coordinates and radius """ px = dom.pixel_size people = sp.zeros((Np, 3)) # x y r people[:, 0] = rng.uniform(xmin, xmax, Np) people[:, 1] = rng.uniform(ymin, ymax, Np) people[:, 2] = rng.uniform(rmin, rmax, Np) I = sp.floor((people[:, 1] - dom.ymin - 0.5 * px) / px).astype(int) J = sp.floor((people[:, 0] - dom.xmin - 0.5 * px) / px).astype(int) D = dom.wall_distance[I, J] - people[:, 2] ind = sp.where(D <= px) while (ind[0].shape[0] > 0): people[ind[0], 0] = rng.uniform(xmin, xmax, ind[0].shape[0]) people[ind[0], 1] = rng.uniform(ymin, ymax, ind[0].shape[0]) I = sp.floor((people[:, 1] - dom.ymin - 0.5 * px) / px).astype(int) J = sp.floor((people[:, 0] - dom.xmin - 0.5 * px) / px).astype(int) D = dom.wall_distance[I, J] - people[:, 2] ind = sp.where(D <= px) return people
def seq_fourparsinefit(y, t, f0, tol=1.0e-7, nmax=1000, debug_plot=False, periods=1): """ period-wise sine-fit at a known frequency\n y vector of sample values \n t vector of sample times\n f0 estimate of excitation frequency\n nmax maximum of iteration to improve f0 \n debug_plot Flag for plotting the sequential fit for dubugging \n \n returns a (n,3)-matrix of coefficient-triplets [[a,b,c], ...]\n for y = a*sin(2*pi*f0*t) + b*cos(2*pi*f0*t) + c """ Tau = 1.0 / f0 dt = t[1] - t[0] N = int(sp.floor(Tau / dt)) * periods ## samples per section M = int(sp.floor(t.size / N)) ## number of sections or periods abcd = sp.zeros((M, 4)) for i in range(M): ti = t[i * N:(i + 1) * N] yi = y[i * N:(i + 1) * N] abcd[i, :] = fourparsinefit(yi, ti, f0, tol=tol, nmax=nmax) if debug_plot: mp.ioff() fig = mp.figure("seq_fourparsinefit") fig.clear() p1 = fig.add_subplot(211) p2 = fig.add_subplot(212, sharex=p1) for i in range(M): p1.plot(t[i * N:(i + 1) * N], y[i * N:(i + 1) * N], ".") s = calc_fourparsine(abcd[i, :], t[i * N:(i + 1) * N]) # fitted data to plot p1.plot(t[i * N:(i + 1) * N], s, "-") r = y[i * N:(i + 1) * N] - s # residuals to plot p2.plot(t[i * N:(i + 1) * N], r, ".") yi = y[i * N:(i + 1) * N] mp.show() return abcd ## matrix of all fit vectors per period
def __init__(self, center, sigma, f, population = None, center_learning_rate = 1.0, sigma_learning_rate = None): """ Separable NES, as described in Schaul, Glasmachers and Schmidhuber (GECCO'11). Maximizes a function f. Returns (best solution found, corresponding fitness) """ self.dim = len(center) self.center = center.copy() if sigma == None: sigma = 1.0 if type(sigma) == type(.0): self.sigmas = ones(self.dim) * sigma else: self.sigmas = sigma if not population: self.population = 4 + int(floor(3 * log(self.dim))) else: self.population = population self.population -= self.population % 2 #make even for symmetry trick self.learning_rate = sigma_learning_rate if self.learning_rate is None: self.learning_rate = 100 * 0.6 * (3 + log(self.dim)) / self.dim / sqrt(self.dim) self.center_learning_rate = center_learning_rate self.numEvals = 0 self.bestFound = None self.bestFitness = -Inf self.f = f
def __getTractionMeshNodes(self, nodeset, x, face): """ Gets nodes of traction mesh element at given global x """ # Map face onto ix ix = int(np.floor(face / 2)) # Implementation for two dimensions if self.rank == 2: # Loop over indices of trFace for inod in range(len(self.tnodeIndices[ix]) - 1): # Get coords of nodes in trFace[inod: inod+2] connect = self.tnodeIndices[ix][inod:inod + 2] coords = nodeset.getCoords(connect) # Get correct index index = (ix + 1) % 2 # Check if c0[index] < x[index] < c1[index] if coords[0, index] < x[index] < coords[1, index]: return connect raise RuntimeError(" No connect found. ") elif self.rank == 3: raise NotImplementedError(" Not yet implemented. ")
def ngp(parameters,positions,values): values_ngp = sp.zeros((parameters.Ng,parameters.Ng,parameters.Ng)) counts_ngp = sp.zeros((parameters.Ng,parameters.Ng,parameters.Ng)) cellsize = parameters.boxsize/parameters.Ng for position,pvalue in zip(positions,values): position = sp.array(position) position_cellunits = position/cellsize # cell indices cell_indices = sp.floor(position_cellunits) if periodic_boundaries: cell_indices = sp.mod(cell_indices,parameters.Ng) index_x, index_y, index_z = cell_indices[0],cell_indices[1],cell_indices[2] values_ngp[index_x][index_y][index_z] += pvalue counts_ngp[index_x][index_y][index_z] += 1 values_ngp = sp.array(values_ngp)/sp.array(counts_ngp) print "Don't mind this warning. Astropy can handle nan-values" return values_ngp
def spect2acf(omeg, spec, n=None): """ Creates acf and time array associated with the given frequency vector and spectrum Inputs: omeg: The frequency sampling vector spec: The spectrum array. n: optional, default len(spec), Length of output spectrum Output: tau: The time sampling array. acf: The acf from the original spectrum.""" if n is None: n = float(spec.shape[-1]) # padnum = sp.floor(len(spec)/2) df = omeg[1] - omeg[0] # specpadd = sp.pad(spec,(padnum,padnum),mode='constant',constant_values=(0.0,0.0)) acf = scfft.fftshift(scfft.ifft(scfft.ifftshift(spec, axes=-1), n, axis=-1), axes=-1) acf = acf / n dt = 1 / (df * n) tau = sp.arange(-sp.ceil(float(n - 1) / 2.), sp.floor(float(n - 1) / 2.) + 1) * dt return tau, acf
def makepulse(ptype, plen, ts): """ This will make the pulse array. Inputs ptype - The type of pulse used. plen - The length of the pulse in seconds. ts - The sampling rate of the pulse. Output pulse - The pulse array that will be used as the window in the data formation. plen - The length of the pulse with the sampling time taken into account. """ nsamps = int(sp.floor(plen / ts)) if ptype.lower() == 'long': pulse = sp.ones(nsamps) plen = nsamps * ts elif ptype.lower() == 'barker': blen = sp.array([1, 2, 3, 4, 5, 7, 11, 13]) nsampsarg = sp.argmin(sp.absolute(blen - nsamps)) nsamps = blen[nsampsarg] pulse = GenBarker(nsamps) plen = nsamps * ts #elif ptype.lower()=='ac': else: raise ValueError('The pulse type %s is not a valide pulse type.' % (ptype)) return (pulse, plen)
def calc_modal_vector(atoms1,atoms2): """ Calculate the 'modal vector', i.e. the difference vector between the two configurations. The minimum image convention is applied! """ from scipy.linalg import inv from scipy import array,dot from scipy import sign,floor cell1 = atoms1.get_cell() cell2 = atoms2.get_cell() # The cells need to be the same (otherwise the whole process won't make sense) if (cell1 != cell2).any(): raise ValueError("Encountered different cells in atoms1 and atoms2. Those need to be the same.") cell = cell1 icell = inv(cell) frac1 = atoms1.get_scaled_positions() frac2 = atoms2.get_scaled_positions() modal_vector_frac = frac1 - frac2 for i in range(modal_vector_frac.shape[0]): for j in range(modal_vector_frac.shape[1]): if abs(modal_vector_frac[i,j]) > .5: value = modal_vector_frac[i,j] vsign = sign(modal_vector_frac[i,j]) absvalue = abs(value) modal_vector_frac[i,j] = value - vsign*floor(absvalue+.5) return dot(modal_vector_frac,cell)
def rebin_diff_noise(dll,ll,diff): crebin = 3 if (diff.size < crebin): print("Warning: diff.size too small for rebin") return diff dll2 = crebin*dll # rebin not mixing pixels separated by masks bin2 = sp.floor((ll-ll.min())/dll2+0.5).astype(int) # rebin regardless of intervening masks # nmax = diff.size//crebin # bin2 = np.zeros(diff.size) # for n in range (1,nmax +1): # bin2[n*crebin:] += np.ones(diff.size-n*crebin) cdiff2 = np.bincount(bin2.astype(int),weights=diff) civ2 = np.bincount(bin2.astype(int)) w = (civ2>0) if (len(civ2) == 0) : print( "Error: diff size = 0 ",diff) diff2 = cdiff2[w]/civ2[w]*np.sqrt(civ2[w]) diffout = np.zeros(diff.size) nmax = len(diff)//len(diff2) for n in range (nmax+1) : lengthmax = min(len(diff),(n+1)*len(diff2)) diffout[n*len(diff2):lengthmax] = diff2[:lengthmax-n*len(diff2)] sp.random.shuffle(diff2) return diffout
def diff(start,end): unitCell = end.get_cell() unitCellVectors = array([unitCell[0,0], unitCell[1,1], unitCell[2,2]]) diffS = end.get_scaled_positions()-start.get_scaled_positions() diffS = diffS-floor(diffS+0.5) diffC = diffS*unitCellVectors return (diffC,diffS)
def fitsurface(errfunc,paramlists,inputs): """This function will create a fit surface using an error function given by the user and an N length list of parameter value lists. The output will be a N-dimensional array where each dimension is the size of the array given for each of the parameters. Arrays of one element are not represented in the returned fit surface array. Inputs: errfunc - The function used to determine the error between the given data and the theoretical function paramlists - An N length list of arrays for each of the parameters. inputs - A tuple of the rest of the inputs for error function.""" paramsizlist = sp.array([len(i) for i in paramlists]) outsize = sp.where(paramsizlist!=1)[0] # make the fit surface and flatten it fit_surface = sp.zeros(paramsizlist[outsize]) fit_surface = fit_surface.flatten() for inum in range(sp.prod(paramsizlist)): numcopy = inum curnum = sp.zeros_like(paramsizlist) # TODO: Replace with sp.unravel_index # determine current parameters for i, iparam in enumerate(reversed(paramsizlist)): curnum[i] = sp.mod(numcopy,iparam) numcopy = sp.floor(numcopy/iparam) curnum = curnum[::-1] cur_x = sp.array([ip[curnum[num_p]] for num_p ,ip in enumerate(paramlists)]) diffthing = errfunc(cur_x,*inputs) fit_surface[inum]=(sp.absolute(diffthing)**2).sum() # return the fitsurace after its been de flattened return fit_surface.reshape(paramsizlist[outsize]).copy()
def pref_y(t): if t < 1: return 0 if t - sp.floor(t) < 0.5: return -0.05 else: return 0.05
def plot_down_saw_spec_correct(): plt.close('all') rate, in_sig = wavfile.read('saw.wav') old_rate = 44100 new_rate = 22050 in_sig = sp.float32(in_sig) fin = anfft.fft(in_sig) nsiz = sp.floor(in_sig.size*new_rate/old_rate) nsizh = sp.floor(nsiz/2) fout = sp.zeros(nsiz) fout = fout + 0j fout[0:nsizh] = fin[0:nsizh] fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh])) f = sp.absolute(fout) plt.plot(f[0:f.shape[0]/2]) plt.savefig('sawdownspec.pdf')
def getLogBins(first_point, last_point, log_step): """ get the bin in log scale and the center bin value Parameters: ---------------- first_point, last_point : number First and last point of the x-axis log_step : number Required log-distance between x-points Returns: ----------- xbins : array of the x values at the center (in log-scale) of the bin bins : array of the x values of the bins """ log_first_point = scipy.log10(first_point) log_last_point = scipy.log10(last_point) # Calculate the bins as required by the histogram function, i.e. the bins edges including the rightmost one N_log_steps = scipy.floor((log_last_point - log_first_point) / log_step) + 1.0 llp = N_log_steps * log_step + log_first_point bins_in_log_scale = np.linspace(log_first_point, llp, N_log_steps + 1) bins = 10 ** bins_in_log_scale center_of_bins_log_scale = bins_in_log_scale[:-1] + log_step / 2.0 xbins = 10 ** center_of_bins_log_scale return xbins, bins
def draw_from_Q_true(N, bbox): # Draw xs and ys from a normal distribution vis = sp.random.randn(2*N,2) # Create bimodal distribution ncut = int(sp.floor(2*N/3)) xis = vis[:,0] yis = vis[:,1] yis[:ncut] -= 2.0 yis[ncut:] += 2.0 xis[:ncut] -= 2.0 xis[ncut:] *= 2.0 xis[ncut:] += 1.0 # Shuffle xis and yis indices = sp.arange(len(vis)) sp.random.shuffle(indices) xis = xis[indices] yis = yis[indices] # Select exactly N data points indices = (xis > bbox[0]) & (xis < bbox[1]) & (yis > bbox[2]) & (yis < bbox[3]) xis = xis[indices] xis = xis[:N] yis = yis[indices] yis = yis[:N] return xis, yis
def normalizeLength(self, noteOns, factor): #shibu = 60. / self.wavetempo * (self.binarized_data[0].size / self.duration) shibu = (self.fs/10.) / (self.wavetempo/60.) fixToResolution = noteOns/shibu*480. fixToResolution[:, 2] = noteOns[:, 2] # MIDI_Res(分解能) = 480 MIDI_Res = 480. minnotel = 1./4.*MIDI_Res #rate(許容誤差) rate = 0.5 #NoteNoが大きいものから順に並び替え fixToResolution = self.rowsort(fixToResolution) self.oldFixToResolution = sp.copy(fixToResolution) #lilypond符号用リスト book = [[] for i in range(fixToResolution.shape[0])] for n in range(fixToResolution.shape[0]): x_cor = fixToResolution[n, 0] + minnotel*rate - 1 #x_cor = fixToResolution[n, 0] + minnotel - 1 x_cor = (sp.floor(x_cor/minnotel))*minnotel if(x_cor == 0): x_cor = 1 fixToResolution[n, 0] = x_cor fixToResolution[n, 3], book[n] = self.normalizeNoteLength(fixToResolution[n, 3] + factor) book[n] = self.convertNoteNo(fixToResolution[n, 2]) + book[n] fixToResolution[n, 1] = fixToResolution[n, 3] + fixToResolution[n, 0] - 1 self.book = book return fixToResolution
def plot_pairwise_velocities_r(case,color,all_radial_distances,all_radial_velocities): dr = 0.3 # Mpc/h rmin, rmax = sp.amin(all_radial_distances), sp.amax(all_radial_distances) rrange = rmax-rmin N = int(sp.ceil(rrange/dr)) rs = sp.linspace(rmin,rmax,N) v12_of_r = [[] for index in range(N)] for r,v12 in zip(all_radial_distances,all_pairwise_velocities): index = int(sp.floor((r-rmin)/dr)) v12_of_r[index].append(v12) sigma_12s = sp.zeros(N) v12_means = sp.zeros(N) for index in range(len(sigma_12s)): v12_of_r_index = sp.array(v12_of_r[index]) print "number of counts in the", index,"th bin:", len(v12_of_r_index) sigma_12 = sp.sqrt(sp.mean(v12_of_r_index**2)) v12_mean = -sp.mean(v12_of_r_index) sigma_12s[index] = sigma_12 v12_means[index] = v12_mean plt.plot(rs,sigma_12s,color=color,label='$\sigma_{12}$') plt.plot(rs,v12_means,color=color,label='$|v_{12}|$') plt.xlabel('r [Mpc/h]') plt.ylabel('[km/s]') plt.xscale('log') plt.axis([0.5,100,0,600])
def _apply_chords(self, image, spacing, trim_edges): r''' This method returns a copy of the image with chords applied, solely for visualization purposes. The actual determination of the chord length distribution does not need to this. Notes ----- This private method is called by the varioius public methods which rotate the image correctly prior to sending, then rotate it back upon receipt ''' # Extract size metrics from input image [Lx, Ly, Lz] = sp.shape(image) start = sp.array(sp.floor(spacing/2), dtype=int) Y = sp.arange(start, Ly, spacing) Z = sp.arange(start, Lz, spacing) temp = sp.zeros([Lx, Ly, Lz], dtype=int) # Generate 2D mask of chords in X-dir maskX = sp.zeros([Lx, Ly, 1], dtype=bool) maskX[:, Y, :] = 1 # Apply chord mask to specified layers (Z-dir) of input image temp[:, :, Z] = image[:, :, Z]*maskX if trim_edges: temp[[0, -1], :, :] = 1 temp[:, [0, -1], :] = 1 L = spim.label(temp)[0] ind = sp.where(L == L[0, 0, 0]) temp[ind] = 0 return sp.array(temp, dtype=bool)
def rescale(self): if isQuantity(self.unit): oldUnit = self.unit.inBaseUnits() else: return #Compute decade of field and multiply it to oldUnit oldFieldAmplitude = max(abs(numpy.amax(self.data)),abs(numpy.amin(self.data))) oldUnit *= oldFieldAmplitude #Compute next lower decade decade = scipy.log10(oldUnit.value) newDecade = 10**(scipy.floor(decade)) #Find appropriate prefix baseUnit=oldUnit.unit.name() if baseUnit == 'm': prefixes = PREFIXES_METER else: prefixes = PREFIXES prefixCandidates = map(lambda i: (i[0],abs(i[1]-newDecade)),prefixes) optPrefix = min([prefix[1] for prefix in prefixCandidates]) newPrefix = filter(lambda prefix: prefix[1]==optPrefix,prefixCandidates)[0][0] newUnitName = newPrefix+baseUnit #Convert to new unit newUnit = oldUnit.inUnitsOf(newUnitName) unitAmplitude = newUnit.value if self.data.dtype.name.startswith('int'): self.unit = newUnit/oldFieldAmplitude return self.data *= unitAmplitude/oldFieldAmplitude self.unit = newUnit/unitAmplitude
def split_jobs(Y, Njobs): #slit phenotype matrix into jobs #think about splitting snps also splits = [] [N, Np] = Y.shape #maximal splitting range is one job per phenotype Njobs = min(Njobs,Np) #figure out phenotypes per job (down rounded) npj = int(SP.floor(SP.double(Np)/Njobs)) i0 = 0 i1 = npj for n in xrange(Njobs): if n==(Njobs-1): #make sure last jobs spans all the rest. i1 = Np Y_ = Y[:,i0:i1] splits.append([i0, i1, Y_]) #nex split i0 = i1 i1 = i1 + npj return splits
def _FftSize(Q): ''' Determines FFT size based on samples/symbol (Q) that gaurentees FFT is evaulated at f=0.5 ''' return int(sp.floor(4096.0/Q)*Q)
def getAxis(self,X,Y): """ return the proper axis limits for the plots """ out = [] mM = [(min(X),max(X)),(min(Y),max(Y))] for i,j in mM: #YJC: checking if values are negative, if yes, return 0 and break if j <0 or i <0: return 0 log_i = scipy.log10(i) d, I = scipy.modf(log_i) if log_i < 0: add = 0.5 *(scipy.absolute(d)<0.5) else: add = 0.5 *(scipy.absolute(d)>0.5) m = scipy.floor(log_i) + add out.append(10**m) log_j = scipy.log10(j) d, I = scipy.modf(log_j) if log_j < 0: add = - 0.5 *(scipy.absolute(d)>0.5) else: add = - 0.5 *(scipy.absolute(d)<0.5) m = scipy.ceil(log_j) + add out.append(10**m) return tuple(out)
def xNES(f, x0, maxEvals=1e6, verbose=False, targetFitness= -1e-10): """ Exponential NES (xNES), as described in Glasmachers, Schaul, Sun, Wierstra and Schmidhuber (GECCO'10). Maximizes a function f. Returns (best solution found, corresponding fitness). """ dim = len(x0) I = eye(dim) learningRate = 0.6 * (3 + log(dim)) / dim / sqrt(dim) batchSize = 4 + int(floor(3 * log(dim))) center = x0.copy() A = eye(dim) # sqrt of the covariance matrix numEvals = 0 bestFound = None bestFitness = -Inf while numEvals + batchSize <= maxEvals and bestFitness < targetFitness: # produce and evaluate samples samples = [randn(dim) for _ in range(batchSize)] fitnesses = [f(dot(A, s) + center) for s in samples] if max(fitnesses) > bestFitness: bestFitness = max(fitnesses) bestFound = samples[argmax(fitnesses)] numEvals += batchSize if verbose: print "Step", numEvals / batchSize, ":", max(fitnesses), "best:", bestFitness #print A # update center and variances utilities = computeUtilities(fitnesses) center += dot(A, dot(utilities, samples)) covGradient = sum([u * (outer(s, s) - I) for (s, u) in zip(samples, utilities)]) A = dot(A, expm2(0.5 * learningRate * covGradient)) return bestFound, bestFitness
def cmd_ylim(mu): if scipy.ceil(mu) - mu < mu - scipy.floor(mu): cmax = scipy.ceil(mu) + 1 else: cmax = scipy.ceil(mu) cmin = cmax - 3 return cmin, cmax
def draw_from_Q_true(N, bbox): # Draw xs and ys from a normal distribution vis = sp.random.randn(2 * N, 2) # Create bimodal distribution ncut = int(sp.floor(2 * N / 3)) xis = vis[:, 0] yis = vis[:, 1] yis[:ncut] -= 2.0 yis[ncut:] += 2.0 xis[:ncut] -= 2.0 xis[ncut:] *= 2.0 xis[ncut:] += 1.0 # Shuffle xis and yis indices = sp.arange(len(vis)) sp.random.shuffle(indices) xis = xis[indices] yis = yis[indices] # Select exactly N data points indices = (xis > bbox[0]) & (xis < bbox[1]) & (yis > bbox[2]) & (yis < bbox[3]) xis = xis[indices] xis = xis[:N] yis = yis[indices] yis = yis[:N] return xis, yis
def traj_ensemble_quantiles(traj_set, quantiles=(0.025, 0.5, 0.975)): """ Return a list of trajectories, each one corresponding the a given passed-in quantile. """ all_values = scipy.array([traj.values for traj in traj_set]) sorted_values = scipy.sort(all_values, 0) q_trajs = [] for q in quantiles: # Calculate the index corresponding to this quantile. The q is because # Python arrays are 0 indexed index = q * (len(sorted_values) - 1) below = int(scipy.floor(index)) above = int(scipy.ceil(index)) if above == below: q_values = sorted_values[below] else: # Linearly interpolate... q_below = (1.0*below)/(len(sorted_values)-1) q_above = (1.0*above)/(len(sorted_values)-1) q_values = sorted_values[below] + (q - q_below)*(sorted_values[above] - sorted_values[below])/(q_above - q_below) q_traj = copy.deepcopy(traj_set[0]) q_traj.values = q_values q_trajs.append(q_traj) return q_trajs
def SNES(f, x0, maxEvals=1e6, verbose=False, targetFitness=-1e-10): """ Separable NES, as described in Schaul, Glasmachers and Schmidhuber (GECCO'11). Maximizes a function f. Returns (best solution found, corresponding fitness) """ dim = len(x0) learningRate = 0.2 * (3 + log(dim)) / sqrt(dim) batchSize = 4 + int(floor(3 * log(dim))) center = x0.copy() sigmas = ones(dim) numEvals = 0 bestFound = None bestFitness = -Inf while numEvals + batchSize <= maxEvals and bestFitness < targetFitness: # produce and evaluate samples samples = [randn(dim) for _ in range(batchSize)] fitnesses = [f(sigmas * s + center) for s in samples] if max(fitnesses) > bestFitness: bestFitness = max(fitnesses) bestFound = samples[argmax(fitnesses)] numEvals += batchSize if verbose: print "Step", numEvals/batchSize, ":", max(fitnesses), "best:", bestFitness # update center and variances utilities = computeUtilities(fitnesses) center += sigmas * dot(utilities, samples) covGradient = dot(utilities, [s ** 2 - 1 for s in samples]) sigmas = sigmas * exp(0.5 * learningRate * covGradient) return bestFound, bestFitness
def kteo(data, k=1): """teager energy operator of range k [TEO] The discrete teager energy operator (TEO) of window size k is defined as: M{S{Psi}[x(n)] = x^2(n) - x(n-k) x(n+k)} :type data: ndarray :param data: The signal to operate on. ndim=1 :type k: int :param k: Parameter defining the window size for the TEO. :return: ndarray - Array of same shape as the input signal, holding the kteo response. :except: If inconsistant dims or shapes. """ # checks and inits if data.ndim != 1: raise ValueError( 'ndim != 1! ndim=%s with shape=%s' % (data.ndim, data.shape)) # apply nonlinear energy operator with range k rval = data ** 2 - sp.concatenate(([0] * sp.ceil(k / 2.0), data[:-k] * data[k:], [0] * sp.floor(k / 2.0))) # return return rval
def __findSmallestElement(self, mesh): """ Finds the smallest element dimension on each bndFace """ # smallest element dimensions self.dx_0 = deepcopy(self.dx) # Loop over faces of bndNodes for face, bndFace in enumerate(self.bndNodes): # Map face onto ix ix = int(np.floor(face / 2)) # Get correct index index = (ix + 1) % 2 # Loop over indices of bndFace for inod in range(len(bndFace) - 1): # Get nodal coordinates c0 = mesh.coords[bndFace[inod]] c1 = mesh.coords[bndFace[inod + 1]] # Calculate dx and compare to dx0 dx = c1[index] - c0[index] if dx < self.dx_0[index]: self.dx_0[index] = dx
def __init__(self,centerFrequency = 440.2*1e6, bMag = 0.4e-4, nspec=64, sampfreq=50e3,collfreqmin=1e-2,alphamax=30.0,dFlag=False,f=None): """ Constructor for the class. Inputs : centerFrequency: The radar center frequency in Hz. bMag: The magnetic field magnitude in Teslas. nspec: the number of points of the spectrum. sampfreq: The sampling frequency of the A/Ds in Hz collfreqmin: (Default 1e-2) The minimum collision frequency needed to incorporate it into Gordeyev integral calculations in units of K*sqrt(Kb*Ts/ms) for each ion species. alphamax: (Default 30) The maximum magnetic aspect angle in which the B-field will be taken into account. dFlag: A debug flag, if set true will output debug text. Default is false. f: A numpy array of frequeny points, in Hz, the spectrum will be formed over. Default is None, at that point the frequency vector will be formed using the number of points for the spectrum and the sampling frequency to create a linearly sampled frequency vector. """ self.bMag = bMag self.dFlag = dFlag self.collfreqmin = collfreqmin self.alphamax = alphamax self.K = 2.0*sp.pi*2*centerFrequency/v_C_0 #The Bragg scattering vector, corresponds to half the radar wavelength. if f is None: minfreq = -sp.ceil((nspec-1.0)/2.0) maxfreq = sp.floor((nspec-1.0)/2.0+1) self.f = sp.arange(minfreq,maxfreq)*(sampfreq/(2*sp.ceil((nspec-1.0)/2.0))) else: self.f=f self.omeg = 2.0*sp.pi*self.f
def RWGNumber_cubeNumber_computation(a, max_N_cubes_1D, cube_lower_coord, RWGNumber_edgeCentroidCoord): """This function finds for each edge the cube to which it belongs. a is the length of the side of a cube""" RWGNumber_cube = floor((RWGNumber_edgeCentroidCoord - cube_lower_coord)/a).astype('i') RWGNumber_cubeNumber = RWGNumber_cube[:, 0] * max_N_cubes_1D**2 + RWGNumber_cube[:, 1] * max_N_cubes_1D + RWGNumber_cube[:, 2] RWGNumber_cubeCentroidCoord = cube_lower_coord + a * RWGNumber_cube + ones(3, 'd') * a/2.0 return RWGNumber_cubeNumber.astype('i'), RWGNumber_cubeCentroidCoord.astype('d')
def sart(self): self.wij_sum = sp.zeros((self.ny, self.ny)) if self.pslice is None: slice_range = range(self.nx) else: slice_range = [self.pslice] for self.pslice in slice_range: self.reco = sp.zeros((self.ny, self.ny)) sinogram = self.projections[:,self.pslice,:] self.update_figure(pslice=True) for it in range(self.iterations): self.upd = sp.zeros_like(self.reco) for i in range(self.n_proj): then = time.time() multip = multiprocess(self.ray_update_worker, num_processes=12 ) for chunk in split_seq(range(self.ny), sp.floor(self.ny/multip.num_processes)): multip.add_job((self.angles[i], sinogram[i,:], self.reco.copy(), chunk, it==0)) self.do_closeout(multip) if i%10==0: print 'Iter: {:d}, Proj: {:d}, Duration: {:3.2f} sec'.format(it, i, time.time()-then) if it==0: self.reco+=self.upd/(self.wij_sum+0.1) else: self.reco+=self.relax*self.upd/(self.wij_sum+0.1) self.update_figure()