def radar_cross_section(frequency, width, incident_angle, observation_angle): """ Calculate the bistatic radar cross section for a 2D strip. :param frequency: The frequency of the incident energy (Hz). :param width: The width of the strip (m). :param incident_angle: The incident angle (deg). :param observation_angle: The observation angle (deg). :return: The bistatic radar cross section (m^2). """ # Wavelength wavelength = c / frequency # Wavenumber k = 2.0 * pi / wavelength phi_i = radians(incident_angle) phi_o = radians(observation_angle) rcs_tm = k * width**2 * sin(phi_i) * sinc(width / wavelength * (cos(phi_o) + cos(phi_i)))**2 rcs_te = k * width**2 * sin(phi_o) * sinc(width / wavelength * (cos(phi_o) + cos(phi_i)))**2 return rcs_tm, rcs_te
def radar_cross_section(frequency, width, length, incident_theta, observation_theta, observation_phi): """ Calculate the bistatic radar cross section for a rectangular plate. :param frequency: The frequency of the incident energy (Hz). :param width: The width of the plate (m). :param length: The length of the plate (m). :param incident_theta: The incident angle theta (deg). :param observation_theta: The observation angle theta (deg). :param observation_phi: The observation angle phi (deg). :return: The bistatic radar cross section (m^2). """ # Wavelength wavelength = c / frequency theta_i = radians(incident_theta) theta_o = radians(observation_theta) phi_o = radians(observation_phi) x = width / wavelength * sin(theta_o) * cos(phi_o) y = length / wavelength * (sin(theta_o) * sin(phi_o) - sin(theta_i)) rcs_tm = 4.0 * pi * (length * width / wavelength)**2 * ( cos(theta_i)**2 * (cos(theta_o)**2 * cos(phi_o)**2 + sin(phi_o)**2)) * sinc(x)**2 * sinc(y)**2 rcs_te = 4.0 * pi * (length * width / wavelength)**2 * ( cos(theta_o)**2 * sin(phi_o)**2 + cos(phi_o)**2) * sinc(x)**2 * sinc(y)**2 return rcs_tm, rcs_te
def getXiAuto(self,rp,rt,z,pk_lin,pars): k = self.k if not self.fit_aiso: ap=pars["ap"] at=pars["at"] else: ap=pars["aiso"]*pars["1+epsilon"]*pars["1+epsilon"] at=pars["aiso"]/pars["1+epsilon"] ar=np.sqrt(rt**2*at**2+rp**2*ap**2) mur=rp*ap/ar muk = model.muk kp = k * muk kt = k * np.sqrt(1-muk**2) bias_lya = pars["bias_lya*(1+beta_lya)"]/(1.+pars["beta_lya"]) beta_lya = pars["beta_lya"] if self.uv_fluct: bias_gamma = pars["bias_gamma"] bias_prim = pars["bias_prim"] lambda_uv = pars["lambda_uv"] W = sp.arctan(k*lambda_uv)/(k*lambda_uv) bias_lya_prim = bias_lya + bias_gamma*W/(1+bias_prim*W) beta_lya = bias_lya*beta_lya/bias_lya_prim bias_lya = bias_lya_prim if self.lls: bias_lls = pars["bias_lls"] beta_lls = pars["beta_lls"] L0_lls = pars["L0_lls"] F_lls = sp.sinc(kp*L0_lls/sp.pi) beta_lya = (bias_lya*beta_lya + bias_lls*beta_lls*F_lls)/(bias_lya+bias_lls*F_lls) bias_lya = bias_lya + bias_lls*F_lls pk_full = pk_lin * (1+beta_lya*muk**2)**2*bias_lya**2 Lpar=pars["Lpar_auto"] Lper=pars["Lper_auto"] Gpar = sp.sinc(kp*Lpar/2/sp.pi) Gper = sp.sinc(kt*Lper/2/sp.pi) pk_full*=Gpar**2 pk_full*=Gper**2 sigmaNLper = pars["SigmaNL_perp"] sigmaNLpar = sigmaNLper*pars["1+f"] pk_nl = sp.exp(-(kp*sigmaNLpar)**2/2-(kt*sigmaNLper)**2/2) pk_full *= pk_nl pk_full *= self.DNL(k,muk,self.pk,self.q1_dnl,self.kv_dnl,self.av_dnl,self.bv_dnl,self.kp_dnl,self.dnl_model) evol = self.evolution_Lya_bias(z,[pars["alpha_lya"]])*self.evolution_growth_factor(z) evol /= self.evolution_Lya_bias(self.zref,[pars["alpha_lya"]])*self.evolution_growth_factor(self.zref) evol = evol**2. return self.Pk2Xi(ar,mur,k,pk_full,ell_max=self.ell_max)*evol
def getXiAuto2D(self, rp, rt, z, pk2d, pars): if not self.fit_aiso: ap = pars["ap"] at = pars["at"] else: ap = pars["aiso"] * pars["1+epsilon"] * pars["1+epsilon"] at = pars["aiso"] / pars["1+epsilon"] art = at * rt arp = ap * rp bias_lya = pars["bias_lya*(1+beta_lya)"] / (1. + pars["beta_lya"]) beta_lya = pars["beta_lya"] if self.uv_fluct: bias_gamma = pars["bias_gamma"] bias_prim = pars["bias_prim"] lambda_uv = pars["lambda_uv"] W = sp.arctan(self.k * lambda_uv) / (self.k * lambda_uv) bias_lya_prim = bias_lya + bias_gamma * W / (1 + bias_prim * W) beta_lya = bias_lya * beta_lya / bias_lya_prim bias_lya = bias_lya_prim if self.lls: bias_lls = pars["bias_lls"] beta_lls = pars["beta_lls"] L0_lls = pars["L0_lls"] F_lls = sp.sinc(self.kp * L0_lls / sp.pi) beta_lya = (bias_lya * beta_lya + bias_lls * beta_lls * F_lls) / ( bias_lya + bias_lls * F_lls) bias_lya = bias_lya + bias_lls * F_lls sigmaNLper = pars["SigmaNL_perp"] sigmaNLpar = sigmaNLper * pars["1+f"] pk_full = pk2d * sp.exp( -(sigmaNLper**2 * self.kt**2 + sigmaNLpar**2 * self.kp**2) / 2) pk_full = pk_full * (1 + beta_lya * self.muk**2)**2 * bias_lya**2 Lpar = pars["Lpar_auto"] Lper = pars["Lper_auto"] pk_full *= sp.sinc(self.kp * Lpar / 2 / sp.pi)**2 pk_full *= sp.sinc(self.kt * Lper / 2 / sp.pi)**2 pk_full *= self.DNL(self.k, self.muk, self.pk_2d, self.q1_dnl, self.kv_dnl, self.av_dnl, self.bv_dnl, self.kp_dnl, self.dnl_model) evol = self.evolution_Lya_bias( z, [pars["alpha_lya"]]) * self.evolution_growth_factor(z) evol /= self.evolution_Lya_bias( self.zref, [pars["alpha_lya"]]) * self.evolution_growth_factor( self.zref) evol = evol**2. return fftlog.Pk2XiA(self.k1d, pk_full, arp, art) * evol
def sincKer(width, scale): ''' Return a sinc filter kernel with specified width and scale width - total number of points in the filter (not radius!) scale - zeros will be centered around 0 with spacing "scale" ''' x = r_[0:width + 1] - width * 0.5 k = numpy.outer(scipy.sinc(x / scale), scipy.sinc(x / scale)) return k
def getXiAutoQSO(self, rp, rt, z, pk_lin, pars): k = self.k if not self.fit_aiso: ap = pars["ap"] at = pars["at"] else: ap = pars["aiso"] * pars["1+epsilon"] * pars["1+epsilon"] at = pars["aiso"] / pars["1+epsilon"] ar = sp.sqrt(rt**2 * at**2 + rp**2 * ap**2) mur = rp * ap / ar muk = model.muk kp = k * muk kt = k * sp.sqrt(1 - muk**2) ### QSO-QSO auto correlation bias_qso = pars["bias_qso"] beta_qso = pars["growth_rate"] / bias_qso pk_full = pk_lin * (bias_qso * (1. + beta_qso * muk**2))**2 ### Velocity dispersion if (self.velo_gauss): pk_full *= sp.exp(-0.5 * (kp * pars['sigma_velo_gauss'])**2) if (self.velo_lorentz): pk_full /= 1. + (kp * pars['sigma_velo_lorentz'])**2 ### Peak broadening sigmaNLper = pars["SigmaNL_perp"] sigmaNLpar = sigmaNLper * pars["1+f"] pk_full *= sp.exp(-0.5 * ((sigmaNLper * kt)**2 + (sigmaNLpar * kp)**2)) ### Pixel size Lpar = pars["Lpar_autoQSO"] Lper = pars["Lper_autoQSO"] pk_full *= sp.sinc(kp * Lpar / 2. / sp.pi)**2 pk_full *= sp.sinc(kt * Lper / 2. / sp.pi)**2 ### Redshift evolution qso_evol = [pars['qso_evol_0'], pars['qso_evol_1']] evol = sp.power( self.evolution_growth_factor(z) / self.evolution_growth_factor(self.zref), 2.) evol *= sp.power( self.evolution_QSO_bias(z, qso_evol) / self.evolution_QSO_bias(self.zref, qso_evol), 2.) return self.Pk2Xi(ar, mur, k, pk_full, ell_max=self.ell_max) * evol
def sinc_interp1d(x, s, r): """Interpolates `x`, sampled at times `s` Output `y` is sampled at times `r` inspired from from Matlab: http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html :param ndarray x: input data time series :param ndarray s: input sampling time series (regular sample interval) :param ndarray r: output sampling time series :return ndarray: output data time series (regular sample interval) """ # init s = sp.asarray(s) r = sp.asarray(r) x = sp.asarray(x) if x.ndim == 1: x = sp.atleast_2d(x) else: if x.shape[0] == len(s): x = x.T else: if x.shape[1] != s.shape[0]: raise ValueError('x and s must be same temporal extend') if sp.allclose(s, r): return x.T T = s[1] - s[0] # resample sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r))) return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
def shg_perfect_type_I(self, I1, lambd1, L, theta, phi): """ Calculate the amplitude of the sh with perfect type I phase matching. Effectively, there is only one incident beam since both fundamental beams have the same intensity and wavelength. Equation 2.2.19 on page 78 of Nonlinear Optics by Robert Boyd, Third Edition. :param I1: Intensity of the fundamental. :param lambd1: The wavelength of the fundamental. :param L: Length of the crystal. :param theta: Polar angle. :param phi: Azimuthal angle. :return: Intensity of the type I second harmonic. """ deff = 7.36e-12 n1 = self.outer_normal(lambd1, theta, phi) n2 = self.outer_normal(lambd1, theta, phi) n3 = self.inner_normal(2 * lambd1, theta, phi) omega3 = pc.c * n1 / lambd1 + pc.c * n2 / lambd1 del_k = self.delta_k_I(lambd1, lambd1 / 2, theta, phi) return 8 * deff**2 * omega3**2 * I1**2 / (n1 * n2 * n3 * pc.epsilon0 * pc.c**2) * L**2 * sp.sinc( del_k * L / 2)**2
def radar_cross_section_3d(frequency, radius, incident_angle, observation_angle, number_of_modes, length): """ Calculate the bistatic radar cross section for a finite length cylinder with oblique incidence. :param frequency: The frequency of the incident energy (Hz). :param radius: The radius of the cylinder (m). :param incident_angle: The angle of incidence from z-axis (deg). :param observation_angle: The observation angle (deg). :param number_of_modes: The number of terms to take in the summation. :param length: The length of the cylinder (m). :return: The bistatic radar cross section for the infinite cylinder (m^2). """ # Wavelength wavelength = c / frequency theta_i = radians(incident_angle) theta_o = theta_i # Calculate the 2D RCS rcs_te, rcs_tm = radar_cross_section(frequency, radius, incident_angle, observation_angle, number_of_modes) value = 2.0 * length ** 2 / wavelength * sin(theta_o) ** 2 * \ sinc(length / wavelength * (cos(theta_i) + cos(theta_o))) ** 2 return rcs_te * value, rcs_tm * value
def sum_frequenc_I(self, I1, I2, lambd1, lambd3, L, theta, phi): """ Calculate the intensity of the summed frequency pulse with fundamental pulse intensities I1 and I2 for Type I phase matching. Equation 2.2.19 on page 78 of Nonlinear Optics by Robert Boyd, Third Edition. :param I1: Intensity of one of the fundamental beams. :param I2: Intensity of the other fundamental beam. :param lambd1: One of the fundamental wavelengths. :param lambd3: The target sum frequency. :param L: The length of the crystal. :param theta: Polar angle. :param phi: Azimuthal angle. :return: Intensity of the type I summed frequency beam. """ omega3 = pc.c / lambd3 lambd2 = lambd1 * lambd3 / (lambd1 - lambd3 ) # From w3 = w1 + w2, w=2pi*c/lambda deff = 7.36e-12 # this is really the type II deff n1 = self.outer_normal(lambd1, theta, phi) n2 = self.outer_normal(lambd2, theta, phi) n3 = self.inner_normal(lambd3, theta, phi) del_k = self.delta_k_I(lambd1, lambd3, theta, phi) return 8 * deff**2 * omega3**2 * I1 * I2 / ( n1 * n2 * n3 * pc.epsilon0 * pc.c**2) * L**2 * sp.sinc( del_k * L / 2)**2
def far_fields(width, height, frequency, r, theta, phi): """ Calculate the far zone electric and magnetic fields for a rectangular aperture in a ground plane. with a TE10 distribution of fields in the aperture. :param r: The range to the field point (m). :param theta: The theta angle to the field point (rad). :param phi: The phi angle to the field point (rad). :param width: The width of the aperture (m). :param height: The height of the aperture (m). :param frequency: The operating frequency (Hz). :return: The far zone electric and magnetic fields (V/m), (A/m). """ # Calculate the wavenumber k = 2.0 * pi * frequency / c # Calculate the wave impedance eta = sqrt(mu_0 / epsilon_0) # Define the x and y wavenumber components kx = k * width * 0.5 * sin(theta) * cos(phi) ky = k * height * 0.5 * sin(theta) * sin(phi) # Define the radial-component of the electric far field (V/m) e_r = 0.0 # Define the theta-component of the electric far field (V/m) e_theta = 1j * width * height * k / (2.0 * pi * r) * exp(-1j * k * r) * (-0.5 * pi * sin(phi)) * \ cos(kx) / (kx ** 2 - (0.5 * pi) ** 2) * sinc(ky) # Define the phi-component of the electric far field (V/m) e_phi = 1j * width * height * k / (2.0 * pi * r) * exp(-1j * k * r) * (-0.5 * pi * cos(theta) * cos(phi)) * \ cos(kx) / (kx ** 2 - (0.5 * pi) ** 2) * sinc(ky) # Define the radial-component of the magnetic far field (A/m) h_r = 0.0 # Define the theta-component of the magnetic far field (A/m) h_theta = -1j * width * height * k / (2.0 * pi * eta * r) * exp(-1j * k * r) * \ (-0.5 * pi * cos(theta) * cos(phi)) * cos(kx) / (kx ** 2 - (0.5 * pi) ** 2) * sinc(ky) # Define the phi-component of the magnetic far field (A/m) h_phi = 1j * width * height * k / (pi * eta * r) * exp(-1j * k * r) * (-0.5 * pi * sin(phi)) * \ cos(kx) / (kx ** 2 - (0.5 * pi) ** 2) * sinc(ky) # Return all six components of the far field return e_r, e_theta, e_phi, h_r, h_theta, h_phi
def raised2(t, beta, T): a = 1.0 / T b = sinc(t / T) c = cos((pi * beta * t) / T) d = 1 - pow(2.0 * beta * t / T, 2) #print "a:%s, b:%s, c:%s, d:%s" % (a,b,c,d) return a * b * (c / d)
def far_fields(guide_height, horn_width, horn_effective_length, frequency, r, theta, phi): """ Calculate the electric and magnetic fields in the far field of the horn. :param r: The distance to the field point (m). :param theta: The theta angle to the field point (rad). :param phi: The phi angle to the field point (rad). :param guide_height: The height of the waveguide feed (m). :param horn_width: The width of the horn (m). :param horn_effective_length: The effective length of the horn (m). :param frequency: The operating frequency (Hz). :return: The electric and magnetic fields radiated by the horn (V/m), (A/m). """ # Calculate the wavenumber k = 2.0 * pi * frequency / c # Calculate the wave impedance eta = sqrt(mu_0 / epsilon_0) # Define the radial-component of the electric field e_r = 0.0 # Define the theta-component of the electric field e_theta = sin(phi) * (1.0 + cos(theta)) * sinc(k * guide_height * 0.5 * sin(theta) * sin(phi)) * \ I(k, r, theta, phi, horn_width, guide_height, horn_effective_length) # Define the phi-component of the electric field e_phi = cos(phi) * (1.0 + cos(theta)) * sinc(k * guide_height * 0.5 * sin(theta) * sin(phi)) *\ I(k, r, theta, phi, horn_width, guide_height, horn_effective_length) # Define the radial-component of the magnetic field h_r = 0.0 # Define the theta-component of the magnetic field h_theta = -cos(phi) / eta * (1.0 + cos(theta)) * sinc(k * guide_height * 0.5 * sin(theta) * sin(phi)) * \ I(k, r, theta, phi, horn_width, guide_height, horn_effective_length) # Define the phi-component of the magnetic field h_phi = sin(phi) / eta * (1.0 + cos(theta)) * sinc(k * guide_height * 0.5 * sin(theta) * sin(phi)) * \ I(k, r, theta, phi, horn_width, guide_height, horn_effective_length) # Return all six components of the far field return e_r, e_theta, e_phi, h_r, h_theta, h_phi
def single_pulse(time_delay, doppler_frequency, pulse_width): """ Calculate the ambiguity function for a continuous wave single pulse. :param time_delay: The time delay for the ambiguity function (seconds). :param doppler_frequency: The Doppler frequency for the ambiguity function (Hz). :param pulse_width: The pulse width (seconds). :return: The ambiguity function for a CW pulse. """ ambiguity = abs((1.0 - abs(time_delay / pulse_width)) * sinc(doppler_frequency * (pulse_width - abs(time_delay))))**2 ambiguity[abs(time_delay) > pulse_width] = 0 return ambiguity
def sinc_seq(A = 1.0, tf = 1.0, delta = 0.1): t = -tf dt = 0.001 events = [] v_last = None # Note use of round() to compensate for floating-point arithmetic errors # that lead to inexact results. while t <= tf: v = delta * floor(round(A*sinc(t) / delta, 10)) if v != v_last: events += [(t, v)] v_last = v t += dt return array(events)
def _get_kernel(self,x1,func_name): #evaluate pixel shift if func_name in func_dic.keys(): func = func_dic[func_name] else: func = lambda kx: sp.sinc(kx)*get_window(func_name,kx.size) i = sp.searchsorted(self.x,x1) dpix = (x1 - self.x[i - 1])/(self.x[i] - self.x[i - 1]) if dpix == 1.0: dpix = 0 assert sp.absolute(dpix) < 1.0 kx = sp.r_[-self.kw:self.kw + 1] + dpix k = func(kx) k = k/sp.sum(k) return k
def lfm_pulse(time_delay, doppler_frequency, pulse_width, bandwidth): """ Calculate the ambiguity function for a linear frequency modulated single pulse. :param time_delay: The time delay for the ambiguity function (seconds). :param doppler_frequency: The Doppler frequency for the ambiguity function (Hz). :param pulse_width: The waveform pulse width (seconds). :param bandwidth: The waveform band width (Hz). :return: The ambiguity function for an LFM pulse. """ ambiguity = abs((1.0 - abs(time_delay) / pulse_width) * sinc(pi * pulse_width * (bandwidth / pulse_width * time_delay + doppler_frequency) * (1.0 - abs(time_delay) / pulse_width)))**2 ambiguity[abs(time_delay) > pulse_width] = 0 return ambiguity
def sincKer(width, even, scale = 2): ''' Return a sinc filter kernel with specified width and scale width - width of the filter (used by the window) even - true if the generated filter should have an even number of points. scale - zeros will be centred around 0 with spacing "scale" ''' if even: numPts = max(2*int((width+1)/2), 2) else: numPts = max(2*int(width/2) + 1, 3) x = r_[0:numPts]-(numPts-1)*0.5 k = scipy.sinc(x/scale) k *= cos(linspace(-pi/4,pi/4,len(k))) ker = numpy.outer(k,k) ker /= sum(ker.ravel()) return ker
def sincKer(width, even, scale=2): ''' Return a sinc filter kernel with specified width and scale width - width of the filter (used by the window) even - true if the generated filter should have an even number of points. scale - zeros will be centred around 0 with spacing "scale" ''' if even: numPts = max(2 * int((width + 1) / 2), 2) else: numPts = max(2 * int(width / 2) + 1, 3) x = r_[0:numPts] - (numPts - 1) * 0.5 k = scipy.sinc(x / scale) k *= cos(linspace(-pi / 4, pi / 4, len(k))) ker = numpy.outer(k, k) ker /= sum(ker.ravel()) return ker
def radial_transform(self, width) : """Return the radial beam Fourier transform function. In the radial direction the beam is just top hat function, so the Fourrier transform is a sinc function. Parameters ---------- width : float The frequency width of the beam function. Returns ------- transform : function Call signiture : transform(k_rad). Vectorized radial beam transform as a function of radial wave number. Accepts an array of wave numbers (with units the reciprocal of those of `width`) and returns an array of the same shape. """ factor = width/2.0/sp.pi return lambda k_rad : sp.sinc(k_rad * factor)
def radial_transform(self, width): """Return the radial beam Fourier transform function. In the radial direction the beam is just top hat function, so the Fourier transform is a sinc function. Parameters ---------- width: float The frequency width of the beam function. Returns ------- transform: function Call signature: transform(k_rad). Vectorized radial beam transform as a function of radial wave number. Accepts an array of wave numbers (with units the reciprocal of those of `width`) and returns an array of the same shape. """ factor = width / 2. / sp.pi return lambda k_rad: sp.sinc(k_rad * factor)
def lanczos(N, tau=1): ''' return N-point Lanczos window with width tau ''' return scipy.sinc(linspace(-1, 1, N) / tau)
def make_amb(Fsorg,m_up,plen,nlags,nspec=128,winname = 'boxcar'): """ Make the ambiguity function dictionary that holds the lag ambiguity and range ambiguity. Uses a sinc function weighted by a blackman window. Currently only set up for an uncoded pulse. Inputs: Fsorg: A scalar, the original sampling frequency in Hertz. m_up: The upsampled ratio between the original sampling rate and the rate of the ambiguity function up sampling. plen: The length of the pulse in samples at the original sampling frequency. nlags: The number of lags used. Outputs: Wttdict: A dictionary with the keys 'WttAll' which is the full ambiguity function for each lag, 'Wtt' is the max for each lag for plotting, 'Wrange' is the ambiguity in the range with the lag dimension summed, 'Wlag' The ambiguity for the lag, 'Delay' the numpy array for the lag sampling, 'Range' the array for the range sampling and 'WttMatrix' for a matrix that will impart the ambiguity function on a pulses. """ # make the sinc nsamps = sp.floor(8.5*m_up) nsamps = nsamps-(1-sp.mod(nsamps,2)) nvec = sp.arange(-sp.floor(nsamps/2.0),sp.floor(nsamps/2.0)+1) pos_windows = ['boxcar', 'triang', 'blackman', 'hamming', 'hann', 'bartlett', 'flattop', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann'] curwin = scisig.get_window(winname,nsamps) outsinc = curwin*sp.sinc(nvec/m_up) outsinc = outsinc/sp.sum(outsinc) dt = 1/(Fsorg*m_up) Delay = sp.arange(-(len(nvec)-1),m_up*(nlags+5))*dt t_rng = sp.arange(0,1.5*plen,dt) numdiff = len(Delay)-len(outsinc) outsincpad = sp.pad(outsinc,(0,numdiff),mode='constant',constant_values=(0.0,0.0)) (srng,d2d)=sp.meshgrid(t_rng,Delay) # envelop function envfunc = sp.zeros(d2d.shape) envfunc[(d2d-srng+plen-Delay.min()>=0)&(d2d-srng+plen-Delay.min()<=plen)]=1 envfunc = envfunc/sp.sqrt(envfunc.sum(axis=0).max()) #create the ambiguity function for everything Wtt = sp.zeros((nlags,d2d.shape[0],d2d.shape[1])) cursincrep = sp.tile(outsincpad[:,sp.newaxis],(1,d2d.shape[1])) Wt0 = Wta = cursincrep*envfunc Wt0fft = sp.fft(Wt0,axis=0) for ilag in sp.arange(nlags): cursinc = sp.roll(outsincpad,ilag*m_up) cursincrep = sp.tile(cursinc[:,sp.newaxis],(1,d2d.shape[1])) Wta = cursincrep*envfunc #do fft based convolution, probably best method given sizes Wtafft = scfft.fft(Wta,axis=0) if ilag==0: nmove = len(nvec)-1 else: nmove = len(nvec) Wtt[ilag] = sp.roll(scfft.ifft(Wtafft*sp.conj(Wt0fft),axis=0).real,nmove,axis=0) # make matrix to take # imat = sp.eye(nspec) # tau = sp.arange(-sp.floor(nspec/2.),sp.ceil(nspec/2.))/Fsorg # tauint = Delay # interpmat = spinterp.interp1d(tau,imat,bounds_error=0,axis=0)(tauint) # lagmat = sp.dot(Wtt.sum(axis=2),interpmat) # # triangle window tau = sp.arange(-sp.floor(nspec/2.),sp.ceil(nspec/2.))/Fsorg amb1d = plen-tau amb1d[amb1d<0]=0. amb1d[tau<0]=0. amb1d=amb1d/plen kp = sp.argwhere(amb1d>0).flatten() lagmat = sp.zeros((Wtt.shape[0],nspec)) lagmat.flat[sp.ravel_multi_index((sp.arange(Wtt.shape[0]),kp),lagmat.shape)]=amb1d[kp] Wttdict = {'WttAll':Wtt,'Wtt':Wtt.max(axis=0),'Wrange':Wtt.sum(axis=1),'Wlag':Wtt.sum(axis=2), 'Delay':Delay,'Range':v_C_0*t_rng/2.0,'WttMatrix':lagmat} return Wttdict
def raised(x, beta, T): return (pi / 4.0 * T) * sinc(1 / 2.0 * beta)
def periodic_sinc_deft(x, max_w): """ Return a sinc-based Fourier transform of a discrete event sequence x. The sequence x is assumed to define a possibly non-uniformly sampled periodic time-domain signal. This approach is a semi-analytical technique that essentially works by treating the DE sequence as defining a piecewise-constant signal, and treating the FT as the sum of the FTs of the rectangular pulses that make up that piecewise-constant signal. This version of the DEFT assumes a periodic signal, and like the standard DFT constructs the transform from a finite segment of the signal by assuming a periodic extension of the signal. The result is essentially a frequency sampling of the nonperiodic DEFT at frequencies that are multiples of the fundamental frequency. The rect function is defined as rect(t/tau) = 0 if |t| > tau/2 1 if |t| < tau/2 and has Fourier transform F(w) = tau * sinc(w*tau/(2*pi)) Given events e0 = (t0, v0) and e1 = (t1, v1) we treat them as defining a rectangle rect((t - ((t1 - t0)/2 + t0))/|t1 - t0|) with FT F(w) = v0 * exp(-j*w*(t0+t1)/2) * tau * sinc(w*tau/(2*pi)) where tau = |t1 - t0| Parameters ---------- x : event array array to transform, where each event in the array is a 2-tuple (time, value) max_w : maximum radian frequency over which to evaluate the transform Returns ------- z : complex array z = sum[n=0..N] v0 * exp(-1j*w*(t0 + t1)/2) * tau * sinc(w*tau/(2*pi)) where (t0, v0) = x[n] (t1, v1) = x[n+1] tau = t1 - t0 P = max(t) - min(t) w = 2*pi*k/P for k in -max_w*P/(2*pi) .. max_w*P/(2*pi) """ ts = [t for (t,v) in x] P = max(ts) - min(ts) # Period w0 = (2*pi)/P # Fundamental radian frequency T = max(ts) # Signal end point # Construct frequency sample points k_bound = floor(max_w/w0) w = w0*arange(-k_bound, k_bound+1, dtype=complex) z = zeros(len(w), dtype=complex) xf = append(x, [(T, x[0,1])], axis=0) # manage the wraparound for n in range(len(x)): (t0, v0) = xf[n] (t1, v1) = xf[n+1] tau = abs(t1 - t0) z = z + (v0 * exp(-1j*w*(t0 + tau/2)) * tau * sinc(w*tau/(2*pi))) return asarray(zip(w,z/P))
def make_amb(Fsorg, m_up, plen, pulse, nspec=128, winname='boxcar'): """ Make the ambiguity function dictionary that holds the lag ambiguity and range ambiguity. Uses a sinc function weighted by a blackman window. Currently only set up for an uncoded pulse. Args: Fsorg (:obj:`float`): A scalar, the original sampling frequency in Hertz. m_up (:obj:`int`): The upsampled ratio between the original sampling rate and the rate of the ambiguity function up sampling. plen (:obj:`int`): The length of the pulse in samples at the original sampling frequency. nlags (:obj:`int`): The number of lags used. Returns: Wttdict (:obj:`dict`): A dictionary with the keys 'WttAll' which is the full ambiguity function for each lag, 'Wtt' is the max for each lag for plotting, 'Wrange' is the ambiguity in the range with the lag dimension summed, 'Wlag' The ambiguity for the lag, 'Delay' the numpy array for the lag sampling, 'Range' the array for the range sampling and 'WttMatrix' for a matrix that will impart the ambiguity function on a pulses. """ nspec = int(nspec) nlags = len(pulse) # make the sinc nsamps = sp.floor(8.5 * m_up) nsamps = int(nsamps - (1 - sp.mod(nsamps, 2))) # need to incorporate summation rule vol = 1. nvec = sp.arange(-sp.floor(nsamps / 2.0), sp.floor(nsamps / 2.0) + 1) pos_windows = [ 'boxcar', 'triang', 'blackman', 'hamming', 'hann', 'bartlett', 'flattop', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann' ] curwin = scisig.get_window(winname, nsamps) # Apply window to the sinc function. This will act as the impulse respons of the filter outsinc = curwin * sp.sinc(nvec / m_up) outsinc = outsinc / sp.sum(outsinc) dt = 1 / (Fsorg * m_up) #make delay vector Delay_num = sp.arange(-(len(nvec) - 1), m_up * (nlags + 5)) Delay = Delay_num * dt t_rng = sp.arange(0, 1.5 * plen, dt) if len(t_rng) > 2e4: raise ValueError( 'The time array is way too large. plen should be in seconds.') numdiff = len(Delay) - len(outsinc) numback = int(nvec.min() / m_up - Delay_num.min()) numfront = numdiff - numback # outsincpad = sp.pad(outsinc,(0,numdiff),mode='constant',constant_values=(0.0,0.0)) outsincpad = sp.pad(outsinc, (numback, numfront), mode='constant', constant_values=(0.0, 0.0)) (d2d, srng) = sp.meshgrid(Delay, t_rng) # envelop function t_p = sp.arange(nlags) / Fsorg envfunc = sp.interp(sp.ravel(srng - d2d), t_p, pulse, left=0., right=0.).reshape(d2d.shape) # envfunc = sp.zeros(d2d.shape) # envfunc[(d2d-srng+plen-Delay.min()>=0)&(d2d-srng+plen-Delay.min()<=plen)]=1 envfunc = envfunc / sp.sqrt(envfunc.sum(axis=0).max()) #create the ambiguity function for everything Wtt = sp.zeros((nlags, d2d.shape[0], d2d.shape[1])) cursincrep = sp.tile(outsincpad[sp.newaxis, :], (len(t_rng), 1)) Wt0 = cursincrep * envfunc Wt0fft = sp.fft(Wt0, axis=1) for ilag in sp.arange(nlags): cursinc = sp.roll(outsincpad, ilag * m_up) cursincrep = sp.tile(cursinc[sp.newaxis, :], (len(t_rng), 1)) Wta = cursincrep * envfunc #do fft based convolution, probably best method given sizes Wtafft = scfft.fft(Wta, axis=1) nmove = len(nvec) - 1 Wtt[ilag] = sp.roll(scfft.ifft(Wtafft * sp.conj(Wt0fft), axis=1).real, nmove, axis=1) # make matrix to take imat = sp.eye(nspec) tau = sp.arange(-sp.floor(nspec / 2.), sp.ceil(nspec / 2.)) / Fsorg tauint = Delay interpmat = spinterp.interp1d(tau, imat, bounds_error=0, axis=0)(tauint) lagmat = sp.dot(Wtt.sum(axis=1), interpmat) W0 = lagmat[0].sum() for ilag in range(nlags): lagmat[ilag] = ((vol + ilag) / (vol * W0)) * lagmat[ilag] Wttdict = { 'WttAll': Wtt, 'Wtt': Wtt.max(axis=0), 'Wrange': Wtt.sum(axis=1), 'Wlag': Wtt.sum(axis=2), 'Delay': Delay, 'Range': v_C_0 * t_rng / 2.0, 'WttMatrix': lagmat } return Wttdict
def trim_sinc(kx): #equivalent to setting window to boxcar return sp.sinc(kx)
def make_amb(Fsorg,m_up,plen,pulse,nspec=128,winname = 'boxcar'): """ Make the ambiguity function dictionary that holds the lag ambiguity and range ambiguity. Uses a sinc function weighted by a blackman window. Currently only set up for an uncoded pulse. Args: Fsorg (:obj:`float`): A scalar, the original sampling frequency in Hertz. m_up (:obj:`int`): The upsampled ratio between the original sampling rate and the rate of the ambiguity function up sampling. plen (:obj:`int`): The length of the pulse in samples at the original sampling frequency. nlags (:obj:`int`): The number of lags used. Returns: Wttdict (:obj:`dict`): A dictionary with the keys 'WttAll' which is the full ambiguity function for each lag, 'Wtt' is the max for each lag for plotting, 'Wrange' is the ambiguity in the range with the lag dimension summed, 'Wlag' The ambiguity for the lag, 'Delay' the numpy array for the lag sampling, 'Range' the array for the range sampling and 'WttMatrix' for a matrix that will impart the ambiguity function on a pulses. """ nspec = int(nspec) nlags = len(pulse) # make the sinc nsamps = sp.floor(8.5*m_up) nsamps = int(nsamps-(1-sp.mod(nsamps, 2))) # need to incorporate summation rule vol = 1. nvec = sp.arange(-sp.floor(nsamps/2.0), sp.floor(nsamps/2.0)+1) pos_windows = ['boxcar', 'triang', 'blackman', 'hamming', 'hann', 'bartlett', 'flattop', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann'] curwin = scisig.get_window(winname, nsamps) # Apply window to the sinc function. This will act as the impulse respons of the filter outsinc = curwin*sp.sinc(nvec/m_up) outsinc = outsinc/sp.sum(outsinc) dt = 1/(Fsorg*m_up) #make delay vector Delay_num = sp.arange(-(len(nvec)-1),m_up*(nlags+5)) Delay = Delay_num*dt t_rng = sp.arange(0, 1.5*plen, dt) if len(t_rng) > 2e4: raise ValueError('The time array is way too large. plen should be in seconds.') numdiff = len(Delay)-len(outsinc) numback = int(nvec.min()/m_up-Delay_num.min()) numfront = numdiff-numback # outsincpad = sp.pad(outsinc,(0,numdiff),mode='constant',constant_values=(0.0,0.0)) outsincpad = sp.pad(outsinc,(numback, numfront), mode='constant', constant_values=(0.0, 0.0)) (d2d, srng)=sp.meshgrid(Delay, t_rng) # envelop function t_p = sp.arange(nlags)/Fsorg envfunc = sp.interp(sp.ravel(srng-d2d), t_p,pulse, left=0., right=0.).reshape(d2d.shape) # envfunc = sp.zeros(d2d.shape) # envfunc[(d2d-srng+plen-Delay.min()>=0)&(d2d-srng+plen-Delay.min()<=plen)]=1 envfunc = envfunc/sp.sqrt(envfunc.sum(axis=0).max()) #create the ambiguity function for everything Wtt = sp.zeros((nlags, d2d.shape[0], d2d.shape[1])) cursincrep = sp.tile(outsincpad[sp.newaxis, :], (len(t_rng), 1)) Wt0 = cursincrep*envfunc Wt0fft = sp.fft(Wt0, axis=1) for ilag in sp.arange(nlags): cursinc = sp.roll(outsincpad, ilag*m_up) cursincrep = sp.tile(cursinc[sp.newaxis, :], (len(t_rng), 1)) Wta = cursincrep*envfunc #do fft based convolution, probably best method given sizes Wtafft = scfft.fft(Wta, axis=1) nmove = len(nvec)-1 Wtt[ilag] = sp.roll(scfft.ifft(Wtafft*sp.conj(Wt0fft), axis=1).real, nmove, axis=1) # make matrix to take imat = sp.eye(nspec) tau = sp.arange(-sp.floor(nspec/2.), sp.ceil(nspec/2.))/Fsorg tauint = Delay interpmat = spinterp.interp1d(tau, imat, bounds_error=0, axis=0)(tauint) lagmat = sp.dot(Wtt.sum(axis=1), interpmat) W0 = lagmat[0].sum() for ilag in range(nlags): lagmat[ilag] = ((vol+ilag)/(vol*W0))*lagmat[ilag] Wttdict = {'WttAll':Wtt, 'Wtt':Wtt.max(axis=0), 'Wrange':Wtt.sum(axis=1), 'Wlag':Wtt.sum(axis=2), 'Delay':Delay, 'Range':v_C_0*t_rng/2.0, 'WttMatrix':lagmat} return Wttdict
def sinc_deft(x, T, w): """ Return a sinc-based Fourier transform of a discrete event sequence x. The sequence x is assumed to define a possibly non-uniformly sampled time-domain signal. This approach is a semi-analytical technique that essentially works by treating the DE sequence as defining a piecewise-constant signal, and treating the FT as the sum of the FTs of the rectangular pulses that make up that piecewise-constant signal. This version of the DEFT assumes a finite signal duration, with the signal assumed to return to its zero state at the end of that duration (just as we assume that the signal starts at a zero state). The rect function is defined as rect(t/tau) = 0 if |t| > tau/2 1 if |t| < tau/2 and has Fourier transform F(w) = tau * sinc(w*tau/(2*pi)) Given events e0 = (t0, v0) and e1 = (t1, v1) we treat them as defining a rectangle v0 * rect((t - ((t1 - t0)/2 + t0))/|t1 - t0|) with FT F(w) = v0 * exp(-j*w*(t0+t1)/2) * tau * sinc(w*tau/(2*pi)) where tau = |t1 - t0| See McInnes, A. "A Discrete Event Fourier Transform", https://github.com/allanmcinnes/DEFT. Parameters ---------- x : event array array to transform, where each event in the array is a 2-tuple (time, value) T : signal termination time w : array array of radian frequencies over which the transform should be evaluated Returns ------- z : complex array z = sum[n=0..N] v0 * exp(-1j*w*(t0 + t1)/2) * tau * sinc(w*tau/(2*pi)) where (t0, v0) = x[n] (t1, v1) = x[n+1] tau = t1 - t0 """ assert T >= x[-1][0], \ "Termination tag T = {0:g} must be greater".format(T) + \ " than or equal to the tag of the final event in the signal, " + \ "t = {0:g}. ".format(x[-1][0]) + \ "However, T is {0:g} less than t.".format(x[-1][0] - T) z = zeros(len(w), dtype=complex) xf = append(x, [(T, 0.0)], axis=0) # Append a terminating zero event for n in range(len(x)): (t0, v0) = xf[n] (t1, v1) = xf[n+1] tau = abs(t1 - t0) z = z + (v0 * exp(-1j*w*(t0 + tau/2)) * tau * sinc(w*tau/(2*pi))) return asarray(zip(w,z))
def valueCross(self,pars): qso_boost = pars["qso_metal_boost"] qso_evol = [pars['qso_evol_0'],pars['qso_evol_1']] bias_qso = pars["bias_qso"] growth_rate = pars["growth_rate"] beta_qso = growth_rate/bias_qso bias_met = sp.array([pars['bias_'+met] for met in self.met_names]) beta_met = sp.array([pars['beta_'+met] for met in self.met_names]) Lpar = pars["Lpar_cross"] Lper = pars["Lper_cross"] ### Scales if (self.different_drp): drp_met = sp.array([pars['drp_'+met] for met in self.met_names]) drp = sp.outer(sp.ones(self.nd_cross),drp_met) else: drp = pars["drp"] if self.grid: ### Redshift evolution z = self.grid_qso_met[:,:,2] evol = sp.power( self.evolution_growth_factor(z)/self.evolution_growth_factor(self.zref),2. ) evol *= self.evolution_Lya_bias(z,[pars["alpha_lya"]])/self.evolution_Lya_bias(self.zref,[pars["alpha_lya"]]) evol *= self.evolution_QSO_bias(z,qso_evol)/self.evolution_QSO_bias(self.zref,qso_evol) rp_shift = self.grid_qso_met[:,:,0]+drp rt = self.grid_qso_met[:,:,1] r = sp.sqrt(rp_shift**2 + rt**2) mur = rp_shift/r muk = cosmo_model.muk kp = self.k * muk kt = self.k * sp.sqrt(1.-muk**2) ### Correction to linear power-spectrum pk_corr = (1.+0.*muk)*self.pk pk_corr *= sp.sinc(kp*Lpar/2./sp.pi)**2 pk_corr *= sp.sinc(kt*Lper/2./sp.pi)**2 ### Biases b1b2 = qso_boost*bias_qso*bias_met if self.grid: xi_qso_met = sp.zeros(self.grid_qso_met[:,0,0].size) for i in range(self.nmet): pk_full = b1b2[i]*(1. + beta_met[i]*muk**2)*(1. + beta_qso*muk**2)*pk_corr xi_qso_met += cosmo_model.Pk2Xi(r[:,i],mur[:,i],self.k,pk_full,ell_max=self.ell_max)*evol[:,i] else: nbins = list(self.xdmat.values())[0].shape[0] xi_qso_met = sp.zeros(nbins) for i in self.met_names: bias_met = pars["bias_"+i] beta_met = pars["beta_"+i] recalc = beta_met != self.prev_pmet['beta_'+i] or\ growth_rate != self.prev_pmet['growth_rate'] or\ not sp.allclose(qso_evol,self.prev_pmet['qso_evol']) or\ self.prev_pmet['drp'] != drp if recalc: if self.verbose: print("recalculating metal {}".format(i)) self.prev_pmet['beta_'+i] = beta_met self.prev_pmet['growth_rate'] = growth_rate self.prev_pmet['qso_evol'] = qso_evol self.prev_pmet['drp'] = drp z = self.xzeff[i] evol = sp.power( self.evolution_growth_factor(z)/self.evolution_growth_factor(self.zref),2. ) evol *= self.evolution_Lya_bias(z,[pars["alpha_"+i]])/self.evolution_Lya_bias(self.zref,[pars["alpha_"+i]]) evol *= self.evolution_QSO_bias(z,qso_evol)/self.evolution_QSO_bias(self.zref,qso_evol) rp = self.xrp[i] + drp rt = self.xrt[i] r = sp.sqrt(rp**2+rt**2) w=r==0 r[w]=1e-6 mur = rp/r pk_full = (1. + beta_met*muk**2)*(1. + beta_qso*muk**2)*pk_corr self.prev_xi_qso_met[i] = cosmo_model.Pk2Xi(r,mur,self.k,pk_full,ell_max=self.ell_max) self.prev_xi_qso_met[i] = self.xdmat[i].dot(self.prev_xi_qso_met[i]*evol) xi_qso_met += qso_boost*bias_qso*bias_met*self.prev_xi_qso_met[i] return xi_qso_met
def valueAuto(self,pars): bias_lya=pars["bias_lya*(1+beta_lya)"] beta_lya=pars["beta_lya"] bias_lya /= 1+beta_lya if self.templates: bias_met=sp.array([pars['bias_'+met] for met in self.met_names]) beta_met=sp.array([pars['beta_'+met] for met in self.met_names]) amp=sp.zeros([self.nmet,3]) amp[:,0] = bias_met*(1 + (beta_lya+beta_met)/3 + beta_lya*beta_met/5) amp[:,1] = bias_met*(2*(beta_lya+beta_met)/3 + 4*beta_lya*beta_met/7) amp[:,2] = bias_met*8*beta_met*beta_lya/35 amp*=bias_lya xi_lya_met=amp*self.temp_lya_met xi_lya_met=sp.sum(xi_lya_met,axis=(1,2)) amp=sp.zeros([self.nmet,self.nmet,3]) bias_met2 = bias_met*bias_met[None,:] amp[:,:,0] = bias_met2*(1+(beta_met+beta_met[None,:])/3+beta_met*beta_met[None,:]/5) amp[:,:,1] = bias_met2*(2*(beta_met+beta_met[None,:])/3+4*beta_met*beta_met[None,:]/7) amp[:,:,2] = bias_met2*8*beta_met*beta_met[None,:]/35 xi_met_met=amp*self.temp_met_met xi_met_met=sp.sum(xi_met_met,axis=(1,2,3)) else: muk = cosmo_model.muk k = self.k kp = k*muk kt = k*sp.sqrt(1-muk**2) nbins = self.dmat["LYA_"+self.met_names[0]].shape[0] if self.hcds_mets: bias_lls = pars["bias_lls"] beta_lls = pars["beta_lls"] L0_lls = pars["L0_lls"] Flls = sp.sin(kp*L0_lls)/(kp*L0_lls) Lpar_auto = pars["Lpar_auto"] Lper_auto = pars["Lper_auto"] alpha_lya = pars["alpha_lya"] Gpar = sp.sinc(kp*Lpar_auto/2/sp.pi)**2 Gper = sp.sinc(kt*Lper_auto/2/sp.pi)**2 xi_lya_met = sp.zeros(nbins) for met in self.met_names: bias_met = pars['bias_'+met] beta_met = pars['beta_'+met] alpha_met = pars["alpha_"+met] dm = self.dmat["LYA_"+met] recalc = beta_met != self.prev_pmet["beta_"+met]\ or beta_lya != self.prev_pmet["beta_lya"]\ or alpha_lya != self.prev_pmet["alpha_lya"]\ or alpha_met != self.prev_pmet["alpha_"+met] rt = self.auto_rt["LYA_"+met] rp = self.auto_rp["LYA_"+met] zeff = self.auto_zeff["LYA_"+met] r = sp.sqrt(rt**2+rp**2) w = (r==0) r[w] = 1e-6 mur = rp/r if recalc: if self.verbose: print("recalculating ",met) pk = (1+beta_lya*muk**2)*(1+beta_met*muk**2)*self.pk pk *= Gpar*Gper xi = cosmo_model.Pk2Xi(r,mur,self.k,pk,ell_max=self.ell_max) xi *= ((1+zeff)/(1+self.zref))**((alpha_lya-1)*(alpha_met-1)) self.prev_xi_lya_met["LYA_"+met] = self.dmat["LYA_"+met].dot(xi) if self.hcds_mets: recalc = self.prev_pmet["beta_lls"] != beta_lls\ or self.prev_pmet["L0_lls"] != L0_lls if recalc: pk = (1+beta_lls*muk**2)*(1+beta_met*muk**2)*self.pk*Flls pk *= Gpar*Gper xi = cosmo_model.Pk2Xi(r,mur,self.k,pk,ell_max=self.ell_max) xi *= ((1+zeff)/(1+self.zref))**((alpha_lya-1)*(alpha_met-1)) self.prev_xi_dla_met[met] = xi xi_lya_met += bias_lya*bias_met*self.prev_xi_lya_met["LYA_"+met] if self.hcds_mets: xi_lya_met += bias_lls*bias_met*self.prev_xi_dla_met[met] xi_met_met = sp.zeros(nbins) for i,met1 in enumerate(self.met_names): bias_met1 = pars['bias_'+met1] beta_met1 = pars['beta_'+met1] alpha_met1 = pars["alpha_"+met1] for met2 in self.met_names[i:]: rt = self.auto_rt[met1+"_"+met2] rp = self.auto_rp[met1+"_"+met2] zeff = self.auto_zeff[met1+"_"+met2] bias_met2 = pars['bias_'+met2] beta_met2 = pars['beta_'+met2] alpha_met2 = pars["alpha_"+met2] dm = self.dmat[met1+"_"+met2] recalc = beta_met1 != self.prev_pmet["beta_"+met1]\ or beta_met2 != self.prev_pmet["beta_"+met2] if recalc: if self.verbose: print("recalculating ",met1,met2) r = sp.sqrt(rt**2+rp**2) w=r==0 r[w]=1e-6 mur = rp/r pk = (1+beta_met1*muk**2)*(1+beta_met2*muk**2)*self.pk pk *= Gpar*Gper xi = cosmo_model.Pk2Xi(r,mur,self.k,pk,ell_max=self.ell_max) xi *= ((1+zeff)/(1+self.zref))**((alpha_met1-1)*(alpha_met2-1)) self.prev_xi_met_met[met1+"_"+met2] = self.dmat[met1+"_"+met2].dot(xi) xi_met_met += bias_met1*bias_met2*self.prev_xi_met_met[met1+"_"+met2] for i in self.prev_pmet: self.prev_pmet[i] = pars[i] return xi_lya_met + xi_met_met
def distribution3(x, y, x_avr, y_avr, x_scale, y_scale): return sinc((x - x_avr) / x_scale) * sinc((y - y_avr) / y_scale)
def compute_sinc_kernel(acquisition_time, user_time, symmetricization_trick=True, L=None, slice_index=None, n_slices=None, ): """Computes the sinc kernel around given user times (user_time). Parameters ---------- acquisition_time: 1D array of size n_scans acquisition times for the TRs in the underlying experiment (typically 0, TR, 2TR, ..., (n_scans - 1) TR or 0, 1, 2, ..., (n_scans - 1) user_time: 1D array of shape n_user_times the times around which the kernels will be centered (this is the times your want to predict response for) symmetricization_trick: boolean (optional, default True) if true symmetricization trick will be used to reflect the acquisition times about the ordinate axis (this helps the subsequenc sinc-based interpolation) L: int (optional, default None) width of Hanning Window to use in windowing the sinc kernel (this should help preventing the 'teleportation' of artefacts across different TRs, and also make the kernel sparse, thus speeding up the ensuing linear algebra) Returns ------- sinc_kernel: 2D array of shape (len(user_time), 2n_scans - 1) if symmetricization trick has been used or (len(user_time), n_scans) otherwise Raises ------ AssertionError Examples -------- >>> import slice_timing as st >>> from numpy import * >>> at = st.get_acquisition_time(10) >>> ut = st.get_user_time(21, 10, slice_index=9) >>> k = st.compute_sinc_kernel(at, ut) """ # sanitize the times assert len(user_time.shape) == 1 assert len(acquisition_time.shape) == 1 # brag if not slice_index is None: if not n_slices is None: print ("Estimating STC transform (sinc kernel) for slice " "%i/%i...") % (slice_index + 1, n_slices) else: print ("Estimating STC transform (sinc kernel) for slice " "%i") % (slice_index + 1) # symmetricize the acq time if symmetricization_trick: acquisition_time = symmetricized(acquisition_time) # compute time shifts time_shift = np.array([ t - acquisition_time for t in user_time]) # compute kernel sinc_kernel = scipy.sinc(time_shift) # modify the kernel with a Hanning window of width L # around the user times (user_time) if not L is None and L != INFINITY: assert L > 0 sinc_kernel *= hanning_window( time_shift, L) # return computed kernel return scipy.sparse.csr_matrix(sinc_kernel)
def sicifunc(z, a, b, c): si, ci = scipy.special.sici(2.0 * b * (z - a)) #return c*((-1.0+np.cos(2.0*b*(a-z))+2.0*b*(a-z)*si)/(2.0*(b*b)*(z-a))+np.pi/(2.0*b))*b return c * (-scipy.sinc(b * (z - a) / np.pi) * scipy.sin(b * (z - a)) + si + np.pi / 2.0) / np.pi
def Fonction(x, y): z = fabs(sinc(x) * sinc(y)) return z
Lcoh_E = sp.zeros(points) # coherence length as the DC field is varied I_phi = sp.zeros(points) # second harmonic as phi is varied I_theta = sp.zeros(points) # second harmonic as theta is varied I_E = sp.zeros(points) # second harmonic as the DC field is varied # 7.)___________________________________________________________________________________________________________________ for i in range(points): n1_phi = n1_normal.n2(theta, phi_var[i]) n3_phi = n3_normal.n1(theta, phi_var[i]) n1_theta = n1_normal.n2(theta_var[i], phi) n3_theta = n3_normal.n1(theta_var[i], phi) Lcoh_phi[i] = lambd1 * lambd3 / (2 * ((n3_phi - n1_phi) * lambd1)) Lcoh_theta[i] = lambd1 * lambd3 / (2 * ((n3_theta - n1_theta) * lambd1)) I_phi[i] = 8 * (chi3 * E)**2 * omega3**2 * I1**2 * L**2 / ( n**3 * pc.epsilon0 * pc.c**2) * sp.sinc(L / Lcoh_phi[i])**2 I_theta[i] = 8 * (chi3 * E)**2 * omega3**2 * I1**2 * L**2 / (n**3 * pc.epsilon0 * pc.c**2) * \ sp.sinc(L / Lcoh_theta[i])**2 # 8.)___________________________________________________________________________________________________________________ fig = plt.figure() ax1 = fig.add_subplot(2, 1, 1) ax2 = fig.add_subplot(2, 1, 2) ax1.plot(phi_var * 180 / sp.pi, I_phi) ax2.plot(theta_var * 180 / sp.pi, I_theta) # ax1.set_ylim([0, 15]) plt.show()
def getXiCross(self,rp,rt,z,pk_lin,pars): k = self.k if not self.fit_aiso: ap=pars["ap"] at=pars["at"] else: ap=pars["aiso"]*pars["1+epsilon"]*pars["1+epsilon"] at=pars["aiso"]/pars["1+epsilon"] drp=pars["drp"] Lpar=pars["Lpar_cross"] Lper=pars["Lper_cross"] qso_evol = [pars['qso_evol_0'],pars['qso_evol_1']] rp_shift=rp+drp ar=np.sqrt(rt**2*at**2+rp_shift**2*ap**2) mur=rp_shift*ap/ar muk = model.muk kp = k * muk kt = k * np.sqrt(1-muk**2) bias_lya = pars["bias_lya*(1+beta_lya)"]/(1.+pars["beta_lya"]) beta_lya = pars["beta_lya"] ### UV fluctuation if self.uv_fluct: bias_gamma = pars["bias_gamma"] bias_prim = pars["bias_prim"] lambda_uv = pars["lambda_uv"] W = sp.arctan(k*lambda_uv)/(k*lambda_uv) bias_lya_prim = bias_lya + bias_gamma*W/(1+bias_prim*W) beta_lya = bias_lya*beta_lya/bias_lya_prim bias_lya = bias_lya_prim ### LYA-QSO cross correlation bias_qso = pars["bias_qso"] beta_qso = pars["growth_rate"]/bias_qso pk_full = bias_lya*bias_qso*(1+beta_lya*muk**2)*(1+beta_qso*muk**2)*pk_lin ### HCDS-QSO cross correlation if self.lls: bias_lls = pars["bias_lls"] beta_lls = pars["beta_lls"] L0_lls = pars["L0_lls"] F_lls = sp.sinc(kp*L0_lls/sp.pi) pk_full+=bias_lls*F_lls*bias_qso*(1+beta_lls*muk**2)*(1+beta_qso*muk**2)*pk_lin ### Velocity dispersion if (self.velo_gauss): pk_full *= sp.exp( -0.25*(kp*pars['sigma_velo_gauss'])**2 ) if (self.velo_lorentz): pk_full /= np.sqrt(1.+(kp*pars['sigma_velo_lorentz'])**2) ### Peak broadening sigmaNLper = pars["SigmaNL_perp"] sigmaNLpar = sigmaNLper*pars["1+f"] pk_full *= sp.exp( -0.5*( (sigmaNLper*kt)**2 + (sigmaNLpar*kp)**2 ) ) ### Pixel size pk_full *= sp.sinc(kp*Lpar/2./sp.pi)**2 pk_full *= sp.sinc(kt*Lper/2./sp.pi)**2 ### Non-linear correction pk_full *= np.sqrt(self.DNL(self.k,self.muk,self.pk,self.q1_dnl,self.kv_dnl,self.av_dnl,self.bv_dnl,self.kp_dnl,self.dnl_model)) ### Redshift evolution evol = np.power( self.evolution_growth_factor(z)/self.evolution_growth_factor(self.zref),2. ) evol *= self.evolution_Lya_bias(z,[pars["alpha_lya"]])/self.evolution_Lya_bias(self.zref,[pars["alpha_lya"]]) evol *= self.evolution_QSO_bias(z,qso_evol)/self.evolution_QSO_bias(self.zref,qso_evol) return self.Pk2Xi(ar,mur,k,pk_full,ell_max=self.ell_max)*evol
def lanczos(kx): w = 5. out = sp.sinc(kx/w) m = abs(kx) > w out[m] = 0 return out*sp.sinc(kx)
dx = 0.1 t0 = 0.0 tmax = 500.0 dt = 0.1 x = arange(xmin, xmax, dx) temps = arange(t0, tmax, dt) # calcul d'une superposition de deux OPPH en milieu non dispersif omega0 = 1000.0 domega = 1.0 k0 = 0.5 dk = 0.01 #mescourbes=[[2*Psi0*cos(domega*t - dk*xi)*cos(omega0*t - k0*xi) for xi in x] for t in temps] # calcul d'un paquet d'ondes mescourbes = [[ 2 * Psi0 * sinc(domega * t - dk * xi) * cos(omega0 * t - k0 * xi) for xi in x ] for t in temps] # tracé de l'animation fig = pyplot.figure() ax = pyplot.axes(xlim=(0, xmax), ylim=(-1.0, 1.0)) courbe, = ax.plot(x, mescourbes[0]) line_ani = animation.FuncAnimation(fig, traceframe, 100, interval=50, repeat=True) pyplot.show()
def sincSquared(x,A,B,tau,x0): return A*(scipy.sinc(tau * 2*scipy.pi * (x-x0) ))**2 / (2*scipy.pi) + B
epsilon = 1e-5 numPrime0 = (function(x0+epsilon)-function(x0))/epsilon xold = x0 xnew = xold - function(xold)/numPrime0 while abs((xold- xnew)) > tol: xold = xnew numPrimeNew = (function(xnew+epsilon)-function(xnew))/epsilon xnew = xold - function(xold)/numPrimeNew return xnew func = lambda x: x**2 -1 fPrime = lambda x: 2*x func1 = lambda x: sp.cos(x) func2 = lambda x: sp.sin(1/x)*x**2 func3 = lambda x: sp.sinc(x) -x func1Prime = lambda x: -sp.sin(x) func2Prime = lambda x: 2*x*sp.sin(1/x) - sp.cos(1/x) func3Prime = lambda x: -sp.sin(x)/x**2 + sp.cos(x)/x -1 # Problem 2 funcProb2 = lambda x: x**(1/3) # print myNewNewton(funcProb2, .5) # This will diverge as can easily be seen with this analtical derivation: # x_n+1 = x_n - (x_n**1/3)/(x_n**(-2/3)/3) # x_n+1 = x_n - 3*x_n # x_n+1 = -2*x_n // This diverges!