Exemplo n.º 1
0
    def __init__(self,
                 k,
                 nu=None,
                 to_do=None,
                 param_mat=None,
                 low_extrap=None,
                 high_extrap=None,
                 n_pad=None,
                 verbose=False):
        """ inputs:
				* k grid
				* the to_do list: e.g. one_loop density density , bias terms, ...
				* low_extrap is the call to extrapolate the power spectrum to lower k-values,
					this helps with edge effects
				* n_pad is the number of zeros to add to both ends of the array. This helps with
					edge effects.
				* verbose is to turn on verbose settings.
		"""

        # if no to_do list is given, default to fastpt_simple SPT case
        if to_do is None:
            if verbose:
                print(
                    'Note: You are using an earlier call structure for FASTPT. Your code will still run correctly, calling FASTPT_simple. See user manual.'
                )
            if nu is None:  # give a warning if nu=None that a default value is being used.
                print(
                    'WARNING: No value for nu is given. FASTPT_simple is being called with a default of nu=-2'
                )
                nu = -2  # this is the default value for P22+P13 and bias calculation
            self.pt_simple = fastpt_simple.FASTPT(k,
                                                  nu,
                                                  param_mat=param_mat,
                                                  low_extrap=low_extrap,
                                                  high_extrap=high_extrap,
                                                  n_pad=n_pad,
                                                  verbose=verbose)
            return None
        # Exit initialization here, since fastpt_simple performs the various checks on the k grid and does extrapolation.

        # check for log spacing
        print('Initializing k-grid quantities...')
        dk = np.diff(np.log(k))
        # dk_test=np.ones_like(dk)*dk[0]
        delta_L = (log(k[-1]) - log(k[0])) / (k.size - 1)
        dk_test = np.ones_like(dk) * delta_L

        log_sample_test = 'ERROR! FASTPT will not work if your in put (k,Pk) values are not sampled evenly in log space!'
        np.testing.assert_array_almost_equal(dk,
                                             dk_test,
                                             decimal=4,
                                             err_msg=log_sample_test,
                                             verbose=False)

        if verbose:
            print('the minumum and maximum inputed log10(k) are :',
                  np.min(np.log10(k)), np.max(np.log10(k)))
            print('the grid spacing Delta log (k) is',
                  (log(np.max(k)) - log(np.min(k))) / (k.size - 1))
            print('number of input k points are', k.size)
            print('the power spectrum is extraplated to log10(k_min)=',
                  low_extrap)
            print('the power spectrum is extraplated to log10(k_max)=',
                  high_extrap)
            print('the power spectrum has ', n_pad,
                  ' zeros added to both ends of the power spectrum')

        self.k_original = k
        self.extrap = False
        if low_extrap is not None or high_extrap is not None:
            self.EK = k_extend(k, low_extrap, high_extrap)
            k = self.EK.extrap_k()
            self.extrap = True

        self.low_extrap = low_extrap
        self.high_extrap = high_extrap

        self.k_old = k

        # print(self.k_old.size, 'k size')
        # size of input array must be an even number
        if k.size % 2 != 0:
            raise ValueError(
                'Input array must contain an even number of elements.')
        # can we just force the extrapolation to add an element if we need one more? how do we prevent the extrapolation from giving us an odd number of elements? is that hard coded into extrap? or just trim the lowest k value if there is an odd numebr and no extrapolation is requested.

        if n_pad != None:

            self.id_pad = np.arange(k.size) + n_pad
            d_logk = delta_L
            k_pad = np.log(k[0]) - np.arange(1, n_pad + 1) * d_logk
            k_pad = np.exp(k_pad)
            k_left = k_pad[::-1]

            k_pad = np.log(k[-1]) + np.arange(1, n_pad + 1) * d_logk
            k_right = np.exp(k_pad)
            k = np.hstack((k_left, k, k_right))
            n_pad_check = int(np.log(2) / delta_L) + 1
            if n_pad < n_pad_check:
                print('*** Warning ***')
                print(
                    'You should consider increasing your zero padding to at least ',
                    n_pad_check)
                print(
                    'to ensure that the minimum k_output is > 2k_min in the FASTPT universe.'
                )
                print('k_min in the FASTPT universe is ', k[0],
                      ' while k_min_input is ', self.k_old[0])

        self.k = k
        self.k_size = k.size
        # self.scalar_nu=-2
        self.N = k.size

        # define eta_m and eta_n=eta_m
        omega = 2 * pi / (float(self.N) * delta_L)
        self.m = np.arange(-self.N // 2, self.N // 2 + 1)
        self.eta_m = omega * self.m

        self.verbose = verbose
        self.n_pad = n_pad

        # define l and tau_l
        self.n_l = self.m.size + self.m.size - 1
        self.l = np.arange(-self.n_l // 2 + 1, self.n_l // 2 + 1)
        self.tau_l = omega * self.l

        self.dd_do = False
        self.dd_bias_do = False
        self.IA_tt_do = False
        self.IA_ta_do = False
        self.IA_mix_do = False
        self.OV_do = False
        self.kPol_do = False
        self.RSD_do = False

        for entry in to_do:  # convert to_do list to instructions for FAST-PT initialization
            if entry == 'one_loop_dd':
                self.dd_do = True
                continue
            elif entry == 'dd_bias':
                self.dd_do = True
                self.dd_bias_do = True
                continue
            elif entry == 'IA_all' or entry == 'IA':
                self.IA_tt_do = True
                self.IA_ta_do = True
                self.IA_mix_do = True
                continue
            elif entry == 'IA_tt':
                self.IA_tt_do = True
                continue
            elif entry == 'IA_ta':
                self.IA_ta_do = True
                continue
            elif entry == 'IA_mix':
                self.IA_mix_do = True
                continue
            elif entry == 'OV':
                self.OV_do = True
                continue
            elif entry == 'kPol':
                self.kPol_do = True
                continue
            elif entry == 'RSD':
                self.RSD_do = True
                continue
            elif entry == 'IRres':
                self.dd_do = True
                continue
            elif entry == 'all' or entry == 'everything':
                self.dd_do = True
                self.dd_bias_do = True
                self.IA_tt_do = True
                self.IA_ta_do = True
                self.IA_mix_do = True
                self.OV_do = True
                self.kPol_do = True
                self.RSD_do = True
                continue
            else:
                raise ValueError('FAST-PT does not recognize "' + entry +
                                 '" in the to_do list.')

        ### INITIALIZATION of k-grid quantities ###
        if self.dd_do:
            nu = -2
            # parameter matrix for 1-loop calculations
            p_mat = np.array([[0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 4, 0],
                              [2, -2, 2, 0], [1, -1, 1, 0], [1, -1, 3, 0],
                              [2, -2, 0, 1]])

            p_mat_lpt = np.array([[0, 0, 0, 0], [0, 0, 2, 0], [2, -2, 2, 0],
                                  [1, -1, 1, 0], [1, -1, 3, 0], [0, 0, 4, 0],
                                  [2, -2, 0, 1]])

            self.X_spt = scalar_stuff(p_mat, nu, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)
            self.X_lpt = scalar_stuff(p_mat_lpt, nu, self.N, self.m,
                                      self.eta_m, self.l, self.tau_l)

        if self.IA_tt_do:
            hE_tab, hB_tab = IA_tt()
            p_mat_E = hE_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_B = hB_tab[:, [0, 1, 5, 6, 7, 8, 9]]

            self.X_IA_E = tensor_stuff(p_mat_E, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)
            self.X_IA_B = tensor_stuff(p_mat_B, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)

        if self.IA_mix_do:
            IA_A_tab = IA_A()
            IA_DEE_tab = IA_DEE()
            IA_DBB_tab = IA_DBB()
            p_mat_A = IA_A_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_DEE = IA_DEE_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_DBB = IA_DBB_tab[:, [0, 1, 5, 6, 7, 8, 9]]

            self.X_IA_A = tensor_stuff(p_mat_A, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)
            self.X_IA_DEE = tensor_stuff(p_mat_DEE, self.N, self.m, self.eta_m,
                                         self.l, self.tau_l)
            self.X_IA_DBB = tensor_stuff(p_mat_DBB, self.N, self.m, self.eta_m,
                                         self.l, self.tau_l)

        if self.IA_ta_do:
            IA_deltaE1_tab = IA_deltaE1()
            IA_0E0E_tab = IA_0E0E()
            IA_0B0B_tab = IA_0B0B()
            p_mat_deltaE1 = IA_deltaE1_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_0E0E = IA_0E0E_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_0B0B = IA_0B0B_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_IA_deltaE1 = tensor_stuff(p_mat_deltaE1, self.N, self.m,
                                             self.eta_m, self.l, self.tau_l)
            self.X_IA_0E0E = tensor_stuff(p_mat_0E0E, self.N, self.m,
                                          self.eta_m, self.l, self.tau_l)
            self.X_IA_0B0B = tensor_stuff(p_mat_0B0B, self.N, self.m,
                                          self.eta_m, self.l, self.tau_l)

        if self.OV_do:
            # For OV, we can use two different values for
            # nu1=0 and nu2=-2

            OV_tab = OV()
            p_mat = OV_tab[:, [0, 1, 5, 6, 7, 8, 9]]

            self.X_OV = tensor_stuff(p_mat, self.N, self.m, self.eta_m, self.l,
                                     self.tau_l)

        if self.kPol_do:
            tab1, tab2, tab3 = kPol()
            p_mat = tab1[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_kP1 = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

            p_mat = tab2[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_kP2 = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

            p_mat = tab3[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_kP3 = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

        if self.RSD_do:
            tabA, self.A_coeff = RSDA()
            p_mat = tabA[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_RSDA = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)

            tabB, self.B_coeff = RSDB()
            p_mat = tabB[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_RSDB = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)
Exemplo n.º 2
0
	
	sig_ff = filtfilt(B, A, P_in, padlen=200)

	return sig_ff

if __name__=="__main__":

	d=np.loadtxt('Pk_Planck15.dat')
	k=d[:,0]; P0=d[:,1]

	import copy
	test=copy.deepcopy(P0)
	
	low_extrap=-4
	high_extrap=5
	EK=k_extend(k,low_extrap,high_extrap)
	k=EK.extrap_k()
	
	
	P0=EK.extrap_P_low(P0)	
	P0=EK.extrap_P_high(P0)
	
	
	P1=filter_highk(k,P0,1,5)
	P2=filter_lowk(k,P0,.01,.05)
	
	k,P1=EK.PK_orginal(P1)
	k,P2=EK.PK_orginal(P2)
	k,P0=EK.PK_orginal(P0)

	import matplotlib.pyplot as plt
Exemplo n.º 3
0
	def __init__(self,k,nu=None,to_do=None,param_mat=None,low_extrap=None,high_extrap=None,n_pad=None,verbose=False):

		''' inputs:
				* k grid
				* the to_do list: e.g. one_loop density density , bias terms, ...
				* low_extrap is the call to extrapolate the power spectrum to lower k-values,
					this helps with edge effects
				* n_pad is the number of zeros to add to both ends of the array. This helps with 
					edge effects.
				* verbose is to turn on verbose settings. 
		'''
		
		# if no to_do list is given, default to fastpt_simple SPT case
		if (to_do is None): 
			if (verbose):
				print('Note: You are using an earlier call structure for FASTPT. Your code will still run correctly, calling FASTPT_simple. See user manual.')
			if (nu is None):# give a warning if nu=None that a default value is being used.
				print('WARNING: No value for nu is given. FASTPT_simple is being called with a default of nu=-2')
				nu=-2 #this is the default value for P22+P13 and bias calculation
			self.pt_simple=fastpt_simple.FASTPT(k,nu,param_mat=param_mat,low_extrap=low_extrap,high_extrap=high_extrap,n_pad=n_pad,verbose=verbose)
			return None
			# Exit initialization here, since fastpt_simple performs the various checks on the k grid and does extrapolation.
		
		# check for log spacing
		print('Initializing k-grid quantities...')
		dk=np.diff(np.log(k))
		#dk_test=np.ones_like(dk)*dk[0]
		delta_L=(log(k[-1])-log(k[0]))/(k.size-1)
		dk_test=np.ones_like(dk)*delta_L
		
		log_sample_test='ERROR! FASTPT will not work if your in put (k,Pk) values are not sampled evenly in log space!'
		np.testing.assert_array_almost_equal(dk, dk_test, decimal=4, err_msg=log_sample_test, verbose=False)

		if (verbose):
			print('the minumum and maximum inputed log10(k) are :', np.min(np.log10(k)),np.max(np.log10(k)))
			print('the grid spacing Delta log (k) is', (log(np.max(k))-log(np.min(k)))/(k.size-1))
			print('number of input k points are', k.size)
			print('the power spectrum is extraplated to log10(k_min)=', low_extrap)
			print('the power spectrum is extraplated to log10(k_max)=', high_extrap)
			print('the power spectrum has ', n_pad,' zeros added to both ends of the power spectrum')
	
		
		
		self.k_original=k
		self.extrap=False		
		if (low_extrap is not None or high_extrap is not None):
			self.EK=k_extend(k,low_extrap,high_extrap)
			k=self.EK.extrap_k()
			self.extrap=True
			
		self.low_extrap=low_extrap
		self.high_extrap=high_extrap
	
		self.k_old=k
	
		#print(self.k_old.size, 'k size')
		# size of input array must be an even number 
		if (k.size % 2 != 0):
			raise ValueError('Input array must contain an even number of elements.')			
		
		if(n_pad != None):
			
			self.id_pad=np.arange(k.size)+n_pad
			d_logk=delta_L 
			k_pad=np.log(k[0])-np.arange(1,n_pad+1)*d_logk
			k_pad=np.exp(k_pad)
			k_left=k_pad[::-1]
			
			k_pad=np.log(k[-1])+np.arange(1,n_pad+1)*d_logk
			k_right=np.exp(k_pad)
			k=np.hstack((k_left,k,k_right))
			n_pad_check=int(np.log(2)/delta_L) +1
			if (n_pad < n_pad_check): 
				print('*** Warning ***')
				print('You should consider increasing your zero padding to at least ', n_pad_check)
				print('to ensure that the minimum k_output is > 2k_min in the FASTPT universe.')
				print('k_min in the FASTPT universe is ', k[0], ' while k_min_input is ', self.k_old[0])		  
		
		
					  
		self.k=k
		self.k_size=k.size
		#self.scalar_nu=-2
		self.N=k.size
		
		# define eta_m and eta_n=eta_m
		omega=2*pi/(float(self.N)*delta_L)
		self.m=np.arange(-self.N//2,self.N//2+1) 
		self.eta_m=omega*self.m
		
		self.verbose=verbose
		self.n_pad=n_pad
		
		# define l and tau_l
		self.n_l=self.m.size + self.m.size - 1
		self.l=np.arange(-self.n_l//2+1,self.n_l//2+1)
		self.tau_l=omega*self.l
		
		self.dd_do=False
		self.dd_bias_do=False
		self.IA_tt_do=False
		self.IA_ta_do=False
		self.IA_mix_do=False	
		self.OV_do=False
		self.kPol_do=False
		self.RSD_do=False	
		
		for entry in to_do: #convert to_do list to instructions for FAST-PT initialization
			if entry=='one_loop_dd':
				self.dd_do=True
				continue
			elif entry=='dd_bias':
				self.dd_do=True
				self.dd_bias_do=True
				continue
			elif entry=='IA_all' or entry=='IA':
				self.IA_tt_do=True
				self.IA_ta_do=True
				self.IA_mix_do=True
				continue		
			elif entry=='IA_tt':
				self.IA_tt_do=True
				continue
			elif entry=='IA_ta':
				self.IA_ta_do=True
				continue		
			elif entry=='IA_mix':
				self.IA_mix_do=True
				continue
			elif entry=='OV':
				self.OV_do=True
				continue
			elif entry=='kPol':
				self.kPol_do=True
				continue									
			elif entry=='RSD':
				self.RSD_do=True
				continue
			elif entry=='sig4':
				self.dd_do=True
				continue
			elif entry=='all' or entry=='everything':
				self.dd_do=True
				self.dd_bias_do=True
				self.IA_tt_do=True
				self.IA_ta_do=True
				self.IA_mix_do=True
				self.OV_do=True
				self.kPol_do=True
				self.RSD_do=True
				continue
			else:
				raise ValueError('FAST-PT does not recognize "'+entry+'" in the to_do list.') 
		
		### INITIALIZATION of k-grid quantities ###
		if self.dd_do:
			nu=-2
			# parameter matrix for 1-loop calculations 
			p_mat=np.array([[0,0,0,0],[0,0,2,0],[0,0,4,0],[2,-2,2,0],\
						[1,-1,1,0],[1,-1,3,0],[2,-2,0,1] ])

			self.X_spt=scalar_stuff(p_mat,nu,self.N,self.m,self.eta_m,self.l,self.tau_l)
		
		if self.IA_tt_do:
			hE_tab,hB_tab=IA_tt()
			p_mat_E=hE_tab[:,[0,1,5,6,7,8,9]]
			p_mat_B=hB_tab[:,[0,1,5,6,7,8,9]]

			self.X_IA_E=tensor_stuff(p_mat_E,self.N,self.m,self.eta_m,self.l,self.tau_l)
			self.X_IA_B=tensor_stuff(p_mat_B,self.N,self.m,self.eta_m,self.l,self.tau_l)

		if self.IA_mix_do:
			IA_A_tab = IA_A()
			IA_DEE_tab = IA_DEE()
			IA_DBB_tab = IA_DBB()
			p_mat_A=IA_A_tab[:,[0,1,5,6,7,8,9]]
			p_mat_DEE=IA_DEE_tab[:,[0,1,5,6,7,8,9]]
			p_mat_DBB=IA_DBB_tab[:,[0,1,5,6,7,8,9]]

			self.X_IA_A=tensor_stuff(p_mat_A,self.N,self.m,self.eta_m,self.l,self.tau_l)
			self.X_IA_DEE=tensor_stuff(p_mat_DEE,self.N,self.m,self.eta_m,self.l,self.tau_l)
			self.X_IA_DBB=tensor_stuff(p_mat_DBB,self.N,self.m,self.eta_m,self.l,self.tau_l)

		if self.IA_ta_do:
			IA_deltaE1_tab = IA_deltaE1()
			IA_0E0E_tab = IA_0E0E()
			IA_0B0B_tab = IA_0B0B()
			p_mat_deltaE1=IA_deltaE1_tab[:,[0,1,5,6,7,8,9]]
			p_mat_0E0E=IA_0E0E_tab[:,[0,1,5,6,7,8,9]]
			p_mat_0B0B=IA_0B0B_tab[:,[0,1,5,6,7,8,9]]
			self.X_IA_deltaE1=tensor_stuff(p_mat_deltaE1,self.N,self.m,self.eta_m,self.l,self.tau_l)
			self.X_IA_0E0E=tensor_stuff(p_mat_0E0E,self.N,self.m,self.eta_m,self.l,self.tau_l)
			self.X_IA_0B0B=tensor_stuff(p_mat_0B0B,self.N,self.m,self.eta_m,self.l,self.tau_l)

		if self.OV_do:
			# For OV, we can use two different values for 
			# nu1=0 and nu2=-2 
			
			OV_tab=OV()
			p_mat=OV_tab[:,[0,1,5,6,7,8,9]]

			self.X_OV=tensor_stuff(p_mat,self.N,self.m,self.eta_m,self.l,self.tau_l)
			
		if self.kPol_do:
					
			tab1,tab2,tab3=kPol()
			p_mat=tab1[:,[0,1,5,6,7,8,9]]
			self.X_kP1=tensor_stuff(p_mat,self.N,self.m,self.eta_m,self.l,self.tau_l)
							
			p_mat=tab2[:,[0,1,5,6,7,8,9]]
			self.X_kP2=tensor_stuff(p_mat,self.N,self.m,self.eta_m,self.l,self.tau_l)
							
			p_mat=tab3[:,[0,1,5,6,7,8,9]]
			self.X_kP3=tensor_stuff(p_mat,self.N,self.m,self.eta_m,self.l,self.tau_l)

		if self.RSD_do:
			
			tabA,self.A_coeff=RSDA()
			p_mat=tabA[:,[0,1,5,6,7,8,9]]
			self.X_RSDA=tensor_stuff(p_mat,self.N,self.m,self.eta_m,self.l,self.tau_l)	
			
			tabB,self.B_coeff=RSDB()
			p_mat=tabB[:,[0,1,5,6,7,8,9]]
			self.X_RSDB=tensor_stuff(p_mat,self.N,self.m,self.eta_m,self.l,self.tau_l)	
Exemplo n.º 4
0
    def __init__(self,
                 k,
                 nu=None,
                 to_do=None,
                 param_mat=None,
                 low_extrap=None,
                 high_extrap=None,
                 n_pad=None,
                 verbose=False):
        ''' inputs:
				* k grid
				* the to_do list: e.g. one_loop density density , bias terms, ...
				* low_extrap is the call to extrapolate the power spectrum to lower k-values,
					this helps with edge effects
				* n_pad is the number of zeros to add to both ends of the array. This helps with 
					edge effects.
				* verbose is to turn on verbose settings. 
		'''

        # if no to_do list is given, default to fastpt_simple SPT case
        if (to_do is None):
            # set the to_do list to nothing
            # raise an error is nu=None
            if (nu is None):
                raise ValueError(
                    'nu is set to None, you need to specify a numerical value for FASTPT_simple.'
                )

            to_do = []
            if (verbose):
                print(
                    'Note: You are using an earlier call structure for FASTPT. Your code will still run correctly, calling FASTPT_simple. See user manual.'
                )

            self.pt_simple = fastpt_simple.FASTPT(k,
                                                  nu,
                                                  param_mat=param_mat,
                                                  low_extrap=low_extrap,
                                                  high_extrap=high_extrap,
                                                  n_pad=n_pad,
                                                  verbose=verbose)

        # check for log spacing
        dk = np.diff(np.log(k))
        dk_test = np.ones_like(dk) * dk[0]

        log_sample_test = 'ERROR! FASTPT will not work if your k vector is not sampled evenly in log space! \
		You could use the included interpolation routine if you like.'

        np.testing.assert_array_almost_equal(dk,
                                             dk_test,
                                             decimal=4,
                                             err_msg=log_sample_test,
                                             verbose=False)

        if (verbose):
            print('the minumum and maximum inputed log10(k) are :',
                  np.min(np.log10(k)), np.max(np.log10(k)))
            print('the grid spacing Delta log (k) is',
                  (log(np.max(k)) - log(np.min(k))) / (k.size - 1))
            print('number of input k points are', k.size)
            print('the power spectrum is extraplated to log10(k_min)=',
                  low_extrap)
            print('the power spectrum is extraplated to log10(k_max)=',
                  high_extrap)
            print('the power spectrum has ', n_pad,
                  ' zeros added to both ends of the power spectrum')

        self.k_original = k
        self.extrap = False
        if (low_extrap is not None or high_extrap is not None):
            self.EK = k_extend(k, low_extrap, high_extrap)
            k = self.EK.extrap_k()
            self.extrap = True

        self.low_extrap = low_extrap
        self.high_extrap = high_extrap

        self.k_old = k

        #print(self.k_old.size, 'k size')
        # size of input array must be an even number
        if (k.size % 2 != 0):
            raise ValueError(
                'Input array must contain an even number of elements.')

        delta_L = (log(np.max(k)) - log(np.min(k))) / (
            k.size - 1
        )  # need to put in a check to make sure that it is log sampled

        if (n_pad != None):

            self.id_pad = np.arange(k.size) + n_pad
            d_logk = delta_L
            k_pad = np.log(k[0]) - np.arange(1, n_pad + 1) * d_logk
            k_pad = np.exp(k_pad)
            k_left = k_pad[::-1]

            k_pad = np.log(k[-1]) + np.arange(1, n_pad + 1) * d_logk
            k_right = np.exp(k_pad)
            k = np.hstack((k_left, k, k_right))
            n_pad_check = int(np.log(2) / delta_L) + 1
            if (n_pad < n_pad_check):
                print(
                    'Warning, you should consider increasing your zero padding to at least ',
                    n_pad_check, ' .')
                print('So, that you ensure that k > 2k_min.')
                print(' k min in the FASTPT universe is ', k[0],
                      ' while k min input is ', self.k_old[0])

        self.k = k
        self.k_size = k.size
        #self.scalar_nu=-2
        self.N = k.size

        # define eta_m and eta_n=eta_m
        omega = 2 * pi / (float(self.N) * delta_L)
        self.m = np.arange(-self.N // 2, self.N // 2 + 1)
        self.eta_m = omega * self.m

        self.verbose = verbose
        self.n_pad = n_pad

        # define l and tau_l
        self.n_l = self.m.size + self.m.size - 1
        self.l = np.arange(-self.n_l // 2 + 1, self.n_l // 2 + 1)
        self.tau_l = omega * self.l

        self.dd_do = False
        self.dd_bias_do = False
        self.IA_tt_do = False
        self.IA_ta_do = False
        self.IA_mix_do = False
        self.OV_do = False
        self.kPol_do = False
        self.RSD_do = False

        for entry in to_do:  #convert to_do list to instructions for FAST-PT initialization
            if entry == 'one_loop_dd':
                self.dd_do = True
                continue
            elif entry == 'dd_bias':
                self.dd_do = True
                self.dd_bias_do = True
                continue
            elif entry == 'IA_all' or entry == 'IA':
                self.IA_tt_do = True
                self.IA_ta_do = True
                self.IA_mix_do = True
                continue
            elif entry == 'IA_tt':
                self.IA_tt_do = True
                continue
            elif entry == 'IA_ta':
                self.IA_ta_do = True
                continue
            elif entry == 'IA_mix':
                self.IA_mix_do = True
                continue
            elif entry == 'OV':
                self.OV_do = True
                continue
            elif entry == 'kPol':
                self.kPol_do = True
                continue
            elif entry == 'RSD':
                self.RSD_do = True
                continue
            elif entry == 'all' or entry == 'everything':
                self.dd_do = True
                self.dd_bias_do = True
                self.IA_tt_do = True
                self.IA_ta_do = True
                self.IA_mix_do = True
                self.OV_do = True
                self.kPol_do = True
                self.RSD_do = True
                continue
            else:
                raise ValueError('FAST-PT does not recognize "' + entry +
                                 '" in the to_do list.')

        ### INITIALIZATION of k-grid quantities ###
        if self.dd_do:
            nu = -2
            # parameter matrix for 1-loop calculations
            p_mat=np.array([[0,0,0,0],[0,0,2,0],[0,0,4,0],[2,-2,2,0],\
               [1,-1,1,0],[1,-1,3,0],[2,-2,0,1] ])

            self.X_spt = scalar_stuff(p_mat, nu, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

        if self.IA_tt_do:
            hE_tab, hB_tab = IA_tt()
            p_mat_E = hE_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_B = hB_tab[:, [0, 1, 5, 6, 7, 8, 9]]

            self.X_IA_E = tensor_stuff(p_mat_E, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)
            self.X_IA_B = tensor_stuff(p_mat_B, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)

        if self.IA_mix_do:
            IA_A_tab = IA_A()
            IA_DEE_tab = IA_DEE()
            IA_DBB_tab = IA_DBB()
            p_mat_A = IA_A_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_DEE = IA_DEE_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_DBB = IA_DBB_tab[:, [0, 1, 5, 6, 7, 8, 9]]

            self.X_IA_A = tensor_stuff(p_mat_A, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)
            self.X_IA_DEE = tensor_stuff(p_mat_DEE, self.N, self.m, self.eta_m,
                                         self.l, self.tau_l)
            self.X_IA_DBB = tensor_stuff(p_mat_DBB, self.N, self.m, self.eta_m,
                                         self.l, self.tau_l)

        if self.IA_ta_do:
            IA_deltaE1_tab = IA_deltaE1()
            IA_0E0E_tab = IA_0E0E()
            IA_0B0B_tab = IA_0B0B()
            p_mat_deltaE1 = IA_deltaE1_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_0E0E = IA_0E0E_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            p_mat_0B0B = IA_0B0B_tab[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_IA_deltaE1 = tensor_stuff(p_mat_deltaE1, self.N, self.m,
                                             self.eta_m, self.l, self.tau_l)
            self.X_IA_0E0E = tensor_stuff(p_mat_0E0E, self.N, self.m,
                                          self.eta_m, self.l, self.tau_l)
            self.X_IA_0B0B = tensor_stuff(p_mat_0B0B, self.N, self.m,
                                          self.eta_m, self.l, self.tau_l)

        if self.OV_do:
            # For OV, we can use two different values for
            # nu1=0 and nu2=-2

            OV_tab = OV()
            p_mat = OV_tab[:, [0, 1, 5, 6, 7, 8, 9]]

            self.X_OV = tensor_stuff(p_mat, self.N, self.m, self.eta_m, self.l,
                                     self.tau_l)

        if self.kPol_do:

            tab1, tab2, tab3 = kPol()
            p_mat = tab1[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_kP1 = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

            p_mat = tab2[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_kP2 = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

            p_mat = tab3[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_kP3 = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                      self.l, self.tau_l)

        if self.RSD_do:

            tabA, self.A_coeff = RSDA()
            p_mat = tabA[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_RSDA = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)

            tabB, self.B_coeff = RSDB()
            p_mat = tabB[:, [0, 1, 5, 6, 7, 8, 9]]
            self.X_RSDB = tensor_stuff(p_mat, self.N, self.m, self.eta_m,
                                       self.l, self.tau_l)
Exemplo n.º 5
0
	def __init__(self,k,nu,param_mat=None,low_extrap=None,high_extrap=None,n_pad=None,verbose=False):

		
		# check for log spacing
		dk=np.diff(np.log(k))
		dk_test=np.ones_like(dk)*dk[0]
		
		log_sample_test='ERROR! FASTPT will not work if your k vector is not sampled evenly in log space!'
		
		np.testing.assert_array_almost_equal(dk, dk_test, decimal=4, err_msg=log_sample_test, verbose=False)
		
		# size of input array must be an even number 
		if (k.size % 2 != 0):
			raise ValueError('Input array must contain an even number of elements.')
			
		self.extrap=False		
		if (low_extrap is not None or high_extrap is not None):
			self.EK=k_extend(k,low_extrap,high_extrap)
			k=self.EK.extrap_k()
			self.extrap=True
			
		self.low_extrap=low_extrap
		self.high_extrap=high_extrap
		
		
		self.k_old=k
		
		delta_L=(log(np.max(k))-log(np.min(k)))/(k.size-1)
		
		if(n_pad !=None):
			self.id_pad=np.arange(k.size)+n_pad
			d_logk=delta_L
			k_pad=np.log(k[0])-np.arange(1,n_pad+1)*d_logk
			k_pad=np.exp(k_pad)
			k_left=k_pad[::-1]
			
			k_pad=np.log(k[-1])+np.arange(1,n_pad+1)*d_logk
			k_right=np.exp(k_pad)
			k=np.hstack((k_left,k,k_right))
			
			# check to make sure that the n padding sufficient to keep the 
			# FASTPT k_min less than 1/2 of the input k_min (added a plus 1 to be safe)
			n_pad_check=int(np.log(2)/delta_L) +1
			if (n_pad < n_pad_check): 
				print('Warning, you should consider increasing your zero padding to at least ', n_pad_check, ' .')
				print('So, that you ensure that k > 2k_min.')
				print(' k min in the FASTPT universe is ', k[0], ' while k min input is ', self.k_old[0])
						
	
		if(n_pad == None): 
			print('Your results are only good for k > 2k_min')
		
			
		# default parameters for standard P_22_reg
		if param_mat is None:
			param_mat=np.array([[0,0,0,0],[0,0,2,0],[0,0,4,0],[2,-2,2,0],\
							[1,-1,1,0],[1,-1,3,0],[2,-2,0,1] ])
	
		self.k=k
		self.k_size=k.size
		self.nu=nu
		self.p_mat=param_mat
		self.p_size=param_mat.shape[0]
		self.verbose=verbose
		self.n_pad=n_pad
		
		alpha=self.p_mat[:,0]
		beta=self.p_mat[:,1]
		l_Bessel=self.p_mat[:,2]
		type=self.p_mat[:,3]
		

		self.N=k.size
		
		# define eta_m and eta_n=eta_m
		omega=2*pi/(float(self.N)*delta_L)
		self.m=np.arange(-self.N//2,self.N//2+1) 
		self.eta_m=omega*self.m
		
		
		# define l and tau_l
		self.n_l=self.m.size + self.m.size - 1
		self.l=np.arange(-self.n_l//2+1,self.n_l//2+1)
		self.tau_l=omega*self.l
		
		#Q_m=np.zeros((param_mat.shape[0],self.N+1), dtype=complex) 
		self.pf=np.zeros((param_mat.shape[0])) 
		self.two_part_l=np.zeros((param_mat.shape[0],self.l.size), dtype=complex) 
		#Q_n=Q_m
		self.g_m=np.zeros((param_mat.shape[0],self.N+1), dtype=complex) 
		self.g_n=np.zeros((param_mat.shape[0],self.N+1), dtype=complex) 
		self.h_l=np.zeros((param_mat.shape[0],self.l.size),dtype=complex)
		
		self.p=-5-2*self.nu-alpha-beta
	
		for i in range(param_mat.shape[0]):
	
			sigma=l_Bessel[i]+1/2.
		
			# Define Q_m and Q_n and p 
			# use eta_m for Q_n, the value is the same 
			Q_m=3/2.+ nu + alpha[i] + 1j*self.eta_m
			Q_n=3/2.+ nu + beta[i] + 1j*self.eta_m
			p=-5-2*nu-alpha[i]-beta[i]
	
			self.g_m[i,:]=g_m_vals(sigma,Q_m)
			
			if (type[i]==1):
			
				# this is the special case, Corresponding to the regularized version of 
				# J_{2,-2,0,reg}(k)
				# get values for g_n 
				# again use eta_m. 
				s=2+nu + beta[i] 
				Q_n=s+ 1j*self.eta_m
		
				self.g_n[i,:]=gamsn(Q_n)
		
				#two_part_m=2**Q_m
				self.g_m[i,:]=self.g_m[i,:]*2.**Q_m
	
				# prefactor 
				self.pf[i]=(-1)**l_Bessel[i]/pi**3*np.sqrt(pi/2.)
			
				self.two_part_l[i,:]=np.ones(self.l.size)
			
			else:
				self.g_n[i,:]=g_m_vals(sigma,Q_n)
				# pre factor 
				self.pf[i]=(-1)**l_Bessel[i]/pi**2*2.**(2+2*nu+alpha[i]+beta[i])
			
				self.two_part_l[i,:]=exp(1j*self.tau_l*log2)
			
			# calculate h_l     
			#arg=(p+1-1j*tau_l)
			self.h_l[i,:]=gamsn(self.p[i]+1-1j*self.tau_l)
Exemplo n.º 6
0
    return sig_ff


if __name__ == "__main__":

    d = np.loadtxt('Pk_Planck15.dat')
    k = d[:, 0]
    P0 = d[:, 1]

    import copy
    test = copy.deepcopy(P0)

    low_extrap = -4
    high_extrap = 5
    EK = k_extend(k, low_extrap, high_extrap)
    k = EK.extrap_k()

    P0 = EK.extrap_P_low(P0)
    P0 = EK.extrap_P_high(P0)

    P1 = filter_highk(k, P0, 1, 5)
    P2 = filter_lowk(k, P0, .01, .05)

    k, P1 = EK.PK_orginal(P1)
    k, P2 = EK.PK_orginal(P2)
    k, P0 = EK.PK_orginal(P0)

    import matplotlib.pyplot as plt

    ax = plt.subplot(141)