Beispiel #1
0
def power_spectrum_nd(input_array, box_dims=None):
	''' 
	Calculate the power spectrum of input_array and return it as an n-dimensional array,
	where n is the number of dimensions in input_array
	box_side is the size of the box in comoving Mpc. If this is set to None (default),
	the internal box size is used
	
	Parameters:
		* input_array (numpy array): the array to calculate the 
			power spectrum of. Can be of any dimensions.
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
	
	Returns:
		The power spectrum in the same dimensions as the input array.		
	'''

	box_dims = _get_dims(box_dims, input_array.shape)

	print_msg( 'Calculating power spectrum...')
	ft = fftpack.fftshift(fftpack.fftn(input_array.astype('float64')))
	power_spectrum = np.abs(ft)**2
	print_msg( '...done')

	# scale
	boxvol = np.product(map(float,box_dims))
	pixelsize = boxvol/(np.product(input_array.shape))
	power_spectrum *= pixelsize**2/boxvol
	
	return power_spectrum
Beispiel #2
0
def calc_dt_full(xfrac, dens, temp, z = -1, correct=True):
	'''
	Calculate the differential brightness temperature assuming only that Lyman alpha is fully coupled so T_s = T_k
    (NOT T_s >> T_CMB)
	
	Parameters:
		* xfrac (XfracFile object, string or numpy array): the ionization fraction
		* dens (DensityFile object, string or numpy array): density in cgs units
        	* temp (TemperFile object, string or numpy array): the temperature in K
		* z = -1 (float): The redshift (if < 0 this will be figured out from the files)
		* correct = True (bool): if true include a correction for partially ionized cells.

	Returns:
		The differential brightness temperature as a numpy array with
		the same dimensions as xfrac.
	'''

	xi, xi_type   = get_data_and_type(xfrac)
        Ts, Ts_type   = get_data_and_type(temp)
	rho, rho_type = get_data_and_type(dens)
	xi  = xi.astype('float64')
        Ts  = Ts.astype('float64')
	rho = rho.astype('float64')
	
	if z < 0:
		z = determine_redshift_from_filename(xfrac)
		if z < 0: z = determine_redshift_from_filename(dens)
		if z < 0: z = determine_redshift_from_filename(temp)
		if z < 0: raise Exception('No redshift specified. Could not determine from file.')
	
	print_msg('Making full dT box for z=%f' % z)
	
        print "Calculating corrected dbt"
	return _dt_full(rho, xi, Ts, z, correct)
Beispiel #3
0
def calc_dt(xfrac, dens, z=-1):
    """
	Calculate the differential brightness temperature assuming T_s >> T_CMB
	
	Parameters:
		* xfrac (XfracFile object, string or numpy array): the ionization fraction
		* dens (DensityFile object, string or numpy array): density in cgs units
		* z = -1 (float): The redshift (if < 0 this will be figured out from the files)
		
	Returns:
		The differential brightness temperature as a numpy array with
		the same dimensions as xfrac.
	"""

    xi, xi_type = get_data_and_type(xfrac)
    rho, rho_type = get_data_and_type(dens)
    xi = xi.astype("float64")
    rho = rho.astype("float64")

    if z < 0:
        z = determine_redshift_from_filename(xfrac)
        if z < 0:
            z = determine_redshift_from_filename(dens)
            if z < 0:
                raise Exception("No redshift specified. Could not determine from file.")

    print_msg("Making dT box for z=%f" % z)

    # Calculate dT
    return _dt(rho, xi, z)
Beispiel #4
0
def mu_binning(powerspectrum, los_axis = 0, mubins=20, kbins=10, box_dims=None, weights=None,
			exclude_zero_modes=True, binning='log'):
	'''
	This function is for internal use only.
	'''
	
	if weights != None:
		powerspectrum *= weights

	assert(len(powerspectrum.shape)==3)

	k_comp, k = _get_k(powerspectrum, box_dims)

	mu = _get_mu(k_comp, k, los_axis)

	#Calculate k values, and make k bins
	kbins = _get_kbins(kbins, box_dims, k, binning=binning)
	dk = (kbins[1:]-kbins[:-1])/2.
	n_kbins = len(kbins)-1
		
	#Exclude k_perp = 0 modes
	if exclude_zero_modes:
		good_idx = _get_nonzero_idx(powerspectrum.shape, los_axis)
	else:
		good_idx = np.ones_like(powerspectrum)

	#Make mu bins
	if isinstance(mubins,int):
		mubins = np.linspace(-1., 1., mubins+1)
	dmu = (mubins[1:]-mubins[:-1])/2.
	n_mubins = len(mubins)-1

	#Remove the zero component from the power spectrum. mu is undefined here
	powerspectrum[tuple(np.array(powerspectrum.shape)/2)] = 0.

	#Bin the data
	print_msg('Binning data...')
	outdata = np.zeros((n_mubins,n_kbins))
	for ki in range(n_kbins):
		print_msg('Bin %d of %d' % (ki, n_kbins))
		kmin = kbins[ki]
		kmax = kbins[ki+1]
		kidx = get_eval()('(k >= kmin) & (k < kmax)')
		kidx *= good_idx
		for i in range(n_mubins):
			mu_min = mubins[i]
			mu_max = mubins[i+1]
			idx = get_eval()('(mu >= mu_min) & (mu < mu_max) & kidx')
			outdata[i,ki] = np.mean(powerspectrum[idx])

			if weights != None:
				outdata[i,ki] /= weights[idx].mean()

	return outdata, mubins[:-1]+dmu, kbins[:-1]+dk
Beispiel #5
0
def mu_binning(powerspectrum, los_axis = 0, mubins=20, kbins=10, box_dims = None, weights=None,
			exclude_zero_modes = True):
	#This function is for internal use only.
	
	if weights != None:
		powerspectrum *= weights

	assert(len(powerspectrum.shape)==3)

	k_comp, k = _get_k(powerspectrum, box_dims)

	mu = _get_mu(k_comp, k, los_axis)

	#Calculate k values, and make k bins
	kbins = _get_kbins(kbins, box_dims, k)

	dk = (kbins[1:]-kbins[:-1])/2.
	n_kbins = len(kbins)-1

	#Exclude the k_x = 0, k_y = 0, k_z = 0 modes
	if exclude_zero_modes:
		x,y,z = np.indices(powerspectrum.shape)
		zero_ind = (x == k.shape[0]/2) + (y == k.shape[1]/2) + (z == k.shape[2]/2)
		powerspectrum[zero_ind] = 0.

	#Make mu bins
	if isinstance(mubins,int):
		mubins = np.linspace(-1., 1., mubins+1)
	dmu = (mubins[1:]-mubins[:-1])/2.
	n_mubins = len(mubins)-1

	#Remove the zero component from the power spectrum. mu is undefined here
	powerspectrum[tuple(np.array(powerspectrum.shape)/2)] = 0.

	#Bin the data
	print_msg('Binning data...')
	outdata = np.zeros((n_mubins,n_kbins))
	for ki in range(n_kbins):
		kmin = kbins[ki]
		kmax = kbins[ki+1]
		kidx = (k >= kmin) * (k < kmax)
		for i in range(n_mubins):
			mu_min = mubins[i]
			mu_max = mubins[i+1]
			idx = (mu >= mu_min) * (mu < mu_max) * kidx
			outdata[i,ki] = np.mean(powerspectrum[idx])

			if weights != None:
				outdata[i,ki] /= weights[idx].mean()

	return outdata, mubins[:-1]+dmu, kbins[:-1]+dk
Beispiel #6
0
def set_sim_constants(boxsize_cMpc):
	'''This method will set the values of relevant constants depending on the 
	simulation
	
	Parameters:
		* boxsize_cMpc (float): he box size in cMpc/h
		Valid values are 37, 64, 114 or 425
		
	Returns:
		Nothing.
	'''
	global boxsize, LB, nbox_fine, M_box, M_grid, lscale, tscale, velconvert

	boxsize = boxsize_cMpc
	LB = boxsize/const.h	
	if hf.flt_comp(boxsize, 425.):
		hf.print_msg('Setting conversion factors for 425/h Mpc box')
		nbox_fine = 10976
	elif hf.flt_comp(boxsize, 114.):
		hf.print_msg('Setting conversion factors for 114/h Mpc box')
		nbox_fine = 6144
	elif hf.flt_comp(boxsize, 64.):
		hf.print_msg('Setting conversion factors for 64/h Mpc box')
		nbox_fine = 3456
	elif hf.flt_comp(boxsize, 37.):
		hf.print_msg('Setting conversion factors for 37/h Mpc box')
		nbox_fine = 2048
	else:
		raise Exception('Invalid boxsize (%.3f cMpc)' % boxsize_cMpc)

	M_box      = const.rho_matter*(LB*const.Mpc)**3 # mass in box (g, not M0)
	M_grid     = M_box/(float(nbox_fine)**3)
	lscale = (LB)/float(nbox_fine)*const.Mpc # size of a cell in cm, comoving
	tscale = 2.0/(3.0*np.sqrt(const.Omega0)*const.H0/const.Mpc*1.e5) # time scale, when divided by (1+z)2
	velconvert = lambda z: lscale/tscale*(1.0+z)/1.e5
Beispiel #7
0
	def read_from_file(self, filename, old_format = False):
		'''
		Read data from file.
		
		Parameters:
			* filename (string): the file to read from.
			* old_format = False (bool): whether to use the old-style 
				file format.
		Returns:
			Nothing
		'''

		print_msg('Reading density file:%s ...' % filename)
		self.filename = filename
		#Read raw data from density file
		f = open(filename, 'rb')

		if old_format:
			self.mesh_x = 203
			self.mesh_y = 203
			self.mesh_z = 203
		else:
			temp_mesh = np.fromfile(f,count=3,dtype='int32')
			self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh

		self.raw_density = np.fromfile(f, dtype='float32')
		self.raw_density = self.raw_density.reshape((self.mesh_x, self.mesh_y, self.mesh_z), order='F')
		
		f.close()

		#Convert to g/cm^3 (comoving)
		conv_factor = const.rho_crit_0*(float(self.mesh_x)/float(conv.nbox_fine))**3*const.OmegaB
		self.cgs_density = self.raw_density*conv_factor
		print_msg('Mean density: %g' % np.mean(self.cgs_density.astype('float64')))
		print_msg('Critical matter density: %g' % (const.rho_crit_0*const.OmegaB))

		#Store the redshift from the filename
		try:
			import os.path
			name = os.path.split(filename)[1]
			if old_format:
				self.z = float(name[:5])
			else:
				self.z = float(name.split('n_')[0])
		except:
			print_msg('Could not determine redshift from file name')
			self.z = -1
		print_msg( '...done')
Beispiel #8
0
def beam_convolve(input_array, z, fov_mpc, beam_w = None, max_baseline = None, \
				beamshape='gaussian'):
	''' 
	Convolve input_array with a beam of the specified form.
	The beam can be specified either by a width in arcminutes,
	or as a maximum baseline. You must specify exctly one of these
	parameters.
	
	Parameters:
		* input_array (numpy array): the array to be convolved
		* z (float): the redshift of the map
		* fov_mpc (float): the field of view in cMpc
		* beam_w (float) = None: the width of the beam in arcminutes
		* max_baseline (float): the maximum baseline in meters 
			(can be specified instead of beam_w)
		* beamshape (string): The shape of the beam 
			(only 'gaussian' supported at this time)
	
	Returns:
		The convolved array (a numpy array with the same dimensions
		as input_array).
	'''

	if (not beam_w) and (not max_baseline):
		raise Exception('Please specify either a beam width or a maximum baseline')
	elif not beam_w: #Calculate beam width from max baseline
		beam_w = get_beam_w(max_baseline, z)

	angle = angular_size(fov_mpc*1000./(1.0 + z), z)/60.
	mx = input_array.shape[0]

	print_msg('Field of view is %.2f arcminutes' % (angle) )
	print_msg('Convolving with %s beam of size %.2f arcminutes...' % \
				(beamshape, beam_w) )

	#Convolve with beam
	if beamshape == 'gaussian':
		sigma0 = (beam_w)/angle/(2.0 * np.sqrt(2.0*np.log(2.)))*mx
		kernel = gauss_kernel(sigma=sigma0, size=mx)
	else:
		raise Exception('Unknown beamshape: %g' % beamshape)

	#out =  signal.fftconvolve(input_array, kernel)
	out =  fftconvolve(input_array, kernel)

	#fftconvolve makes the output twice the size, so return only the central part	
	ox = out.shape[0]
	return out[ox*0.25:ox*0.75, ox*0.25:ox*0.75]
def observational_lightcone_to_physical(observational_lightcone, input_freqs, input_dtheta):
    '''
    Interpolate a lightcone volume measured in observational (angle/frequency)
    units into  physical (length) units. The output resolution will be set
    to the coarest one, as determined either by the angular or the frequency
    resolution. The lightcone must have the LoS as the last index, with 
    frequencies decreasing along the LoS.
    
    Parameters:
        * observational_lightcone (numpy array): the input lightcone volume
        * input_freqs (numpy array): the frequency in MHz of each slice along the 
            line of sight of the input
        * input_dheta (float): the angular size of a cell in arcmin
        
    Returns:
        * The output volume
        * The redshifts along the LoS of the output
        * The output cell size in Mpc
    '''
    assert input_freqs[0] > input_freqs[-1]
    assert observational_lightcone.shape[0] == observational_lightcone.shape[1]
    
    #Determine new cell size - set either by frequency or angle.
    #The FoV size in Mpc is set by the lowest redshift
    dnu = input_freqs[0]-input_freqs[1]
    z_low = cm.nu_to_z(input_freqs[0])
    fov_deg = observational_lightcone.shape[0]*input_dtheta/60.
    fov_mpc = fov_deg/cm.angular_size_comoving(1., z_low)
    cell_size_perp = fov_mpc/observational_lightcone.shape[0]
    cell_size_par = cm.nu_to_cdist(input_freqs[-1])-cm.nu_to_cdist(input_freqs[-2])
    output_cell_size = max([cell_size_par, cell_size_perp])
    hf.print_msg('Making physical lightcone with cell size %.2f Mpc' % output_cell_size)
    #Go through each slice along frequency axis. Cut off excess and 
    #interpolate down to correct resolution
    n_cells_perp = int(fov_mpc/output_cell_size)
    output_volume_par = np.zeros((n_cells_perp, n_cells_perp, observational_lightcone.shape[2]))
    for i in range(output_volume_par.shape[2]):
        z = cm.nu_to_z(input_freqs[i])
        output_volume_par[:,:,i] = angular_slice_to_physical(observational_lightcone[:,:,i],\
                                                    z, slice_size_deg=fov_deg, output_cell_size=output_cell_size,\
                                                    output_size_mpc=fov_mpc, order=2)
    #Bin along frequency axis
    output_volume, output_redshifts = bin_lightcone_in_mpc(output_volume_par, \
                                                input_freqs, output_cell_size)
    
    return output_volume, output_redshifts, output_cell_size
Beispiel #10
0
def power_spectrum_nd(input_array, box_dims=None):
    """ Calculate the power spectrum of input_array and return it as an n-dimensional array,
	where n is the number of dimensions in input_array
	box_side is the size of the box in comoving Mpc. If this is set to None (default),
	the internal box size is used"""

    box_dims = get_dims(box_dims, input_array.shape)

    utils.print_msg("Calculating power spectrum...")
    ft = fftpack.fftshift(fftpack.fftn(input_array.astype("float64")))
    power_spectrum = np.abs(ft) ** 2
    utils.print_msg("...done")

    # scale
    boxvol = np.product(map(float, box_dims))
    pixelsize = boxvol / (np.product(input_array.shape))
    power_spectrum *= pixelsize ** 2 / boxvol

    return power_spectrum
Beispiel #11
0
def radial_average(input_array, box_dims, kbins=10):
    """
	Radially average the data in input_array
	box_side is the length of the box in Mpc
	kbins can be an integer specifying the number of bins,
	or a list of bins edges. if an integer is given, the bins
	are logarithmically spaced

	"""

    dim = len(input_array.shape)
    if dim == 2:
        x, y = np.indices(input_array.shape)
        center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0])
        kx = 2.0 * np.pi * (x - center[0]) / box_dims[0]
        ky = 2.0 * np.pi * (y - center[1]) / box_dims[1]
        k = np.sqrt(kx ** 2 + ky ** 2)
    elif dim == 3:
        x, y, z = np.indices(input_array.shape)
        center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0, (z.max() - z.min()) / 2.0])
        kx = 2.0 * np.pi * (x - center[0]) / box_dims[0]
        ky = 2.0 * np.pi * (y - center[1]) / box_dims[1]
        kz = 2.0 * np.pi * (z - center[2]) / box_dims[2]
        k = np.sqrt(kx ** 2 + ky ** 2 + kz ** 2)
    else:
        raise Exception("Check your dimensions!")

    if isinstance(kbins, int):
        kmin = 2.0 * np.pi / min(box_dims)
        kbins = 10 ** np.linspace(np.log10(kmin), np.log10(k.max()), kbins + 1)

        # Bin the data
    utils.print_msg("Binning data...")
    nbins = len(kbins) - 1
    dk = (kbins[1:] - kbins[:-1]) / 2.0
    outdata = np.zeros(nbins)
    for ki in range(nbins):
        kmin = kbins[ki]
        kmax = kbins[ki + 1]
        idx = (k >= kmin) * (k < kmax)
        outdata[ki] = np.mean(input_array[idx])

    return outdata, kbins[:-1] + dk
Beispiel #12
0
def cross_power_spectrum_nd(input_array1, input_array2, box_dims):
	''' 
	Calculate the cross power spectrum two arrays and return it as an n-dimensional array,
	where n is the number of dimensions in input_array
	box_side is the size of the box in comoving Mpc. If this is set to None (default),
	the internal box size is used
	
	Parameters:
		* input_array1 (numpy array): the first array to calculate the 
			power spectrum of. Can be of any dimensions.
		* input_array2 (numpy array): the second array. Must have same 
			dimensions as input_array1.
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
	
	Returns:
		The cross power spectrum in the same dimensions as the input arrays.
		
	TODO:
		Also return k values.
	'''

	assert(input_array1.shape == input_array2.shape)

	box_dims = _get_dims(box_dims, input_array1.shape)

	print_msg( 'Calculating power spectrum...')
	ft1 = fftpack.fftshift(fftpack.fftn(input_array1.astype('float64')))
	ft2 = fftpack.fftshift(fftpack.fftn(input_array2.astype('float64')))
	power_spectrum = np.real(ft1)*np.real(ft2)+np.imag(ft1)*np.imag(ft2)
	print_msg( '...done')

	# scale
	#boxvol = float(box_side)**len(input_array1.shape)
	boxvol = np.product(map(float,box_dims))
	pixelsize = boxvol/(np.product(map(float,input_array1.shape)))
	power_spectrum *= pixelsize**2/boxvol

	return power_spectrum
Beispiel #13
0
def bin_lightcone_in_frequency(lightcone, z_low, box_size_mpc, dnu):
    '''
    Bin a lightcone in frequency bins.
    
    Parameters:
        * lightcone (numpy array): the lightcone in length units
        * z_low (float): the lowest redshift of the lightcone
        * box_size_mpc (float): the side of the lightcone in Mpc
        * dnu (float): the width of the frequency bins in MHz
        
    Returns:
        The lightcone, binned in frequencies with high frequencies first
        The frequencies along the line of sight in MHz
    '''
    #Figure out dimensions and make output volume
    cell_size = box_size_mpc/lightcone.shape[0]
    distances = cm.z_to_cdist(z_low) + np.arange(lightcone.shape[2])*cell_size
    input_redshifts = cm.cdist_to_z(distances)
    input_frequencies = cm.z_to_nu(input_redshifts)
    nu1 = input_frequencies[0]
    nu2 = input_frequencies[-1]
    output_frequencies = np.arange(nu1, nu2, -dnu)
    output_lightcone = np.zeros((lightcone.shape[0], lightcone.shape[1], \
                                 len(output_frequencies)))
    
    #Bin in frequencies by smoothing and indexing
    max_cell_size = cm.nu_to_cdist(output_frequencies[-1])-cm.nu_to_cdist(output_frequencies[-2])
    smooth_scale = np.round(max_cell_size/cell_size)
    if smooth_scale < 1:
        smooth_scale = 1

    hf.print_msg('Smooth along LoS with scale %f' % smooth_scale)
    tophat3d = np.ones((1,1,smooth_scale))
    tophat3d /= np.sum(tophat3d)
    lightcone_smoothed = fftconvolve(lightcone, tophat3d)
    
    for i in range(output_lightcone.shape[2]):
        nu = output_frequencies[i]
        idx = hf.find_idx(input_frequencies, nu)
        output_lightcone[:,:,i] = lightcone_smoothed[:,:,idx]

    return output_lightcone, output_frequencies
Beispiel #14
0
def cross_power_spectrum_nd(input_array1, input_array2, box_dims):
    """ Calculate the cross power spectrum of input_array1 and input_array2 and return it as an n-dimensional array,
	where n is the number of dimensions in input_array"""

    assert input_array1.shape == input_array2.shape

    box_dims = get_dims(box_dims, input_array1.shape)

    utils.print_msg("Calculating power spectrum...")
    ft1 = fftpack.fftshift(fftpack.fftn(input_array1.astype("float64")))
    ft2 = fftpack.fftshift(fftpack.fftn(input_array2.astype("float64")))
    power_spectrum = np.real(ft1) * np.real(ft2) + np.imag(ft1) * np.imag(ft2)
    utils.print_msg("...done")

    # scale
    # boxvol = float(box_side)**len(input_array1.shape)
    boxvol = np.product(map(float, box_dims))
    pixelsize = boxvol / (np.product(map(float, input_array1.shape)))
    power_spectrum *= pixelsize ** 2 / boxvol

    return power_spectrum
Beispiel #15
0
	def read_from_file(self, filename, old_format=False):
		'''
		Read data from file.
		
		Parameters:
			* filename (string): the file to read from.
			* old_format = False (bool): whether to use the old-style (32 bits)
				file format.
		Returns:
			Nothing
		'''
		print_msg('Reading xfrac file:%s...' % filename)
		self.filename = filename

		f = open(filename, 'rb')
		temp_mesh = np.fromfile(f, count=6, dtype='int32')
		self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[1:4]

		if old_format:
			self.xi = np.fromfile(f, dtype='float32')
		else:
			self.xi = np.fromfile(f, dtype='float64')
		self.xi = self.xi.reshape((self.mesh_x, self.mesh_y, self.mesh_z), order='F')

		f.close()
		print_msg('...done')

		#Store the redshift from the filename
		import os.path
		try:
			name = os.path.split(filename)[1]
			self.z = float(name.split('_')[1][:-4])
		except:
			print_msg('Could not determine redshift from file name')
			self.z = -1
Beispiel #16
0
	def read_from_file(self, filename):
		'''
		Read data from file. Sets the instance variables
		self.raw_velocity and self.kmsrho8
		
		Parameters:
			* filename (string): the file to read from.
		Returns:
			Nothing
		'''
		print_msg('Reading velocity file: %s...' % filename)
		self.filename = filename

		#Read raw data from velocity file
		f = open(filename, 'rb')
		temp_mesh = np.fromfile(f, count=3, dtype='int32')
		self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh
		self.raw_velocity = np.fromfile(f, dtype='float32').astype('float64')
		f.close()
		self.raw_velocity = self.raw_velocity.reshape((3, self.mesh_x, self.mesh_y, self.mesh_z), order='F')

		#Store the redshift from the filename
		try:
			import os.path
			name = os.path.split(filename)[1]
			self.z = float(name.split('v_')[0])
		except:
			print_msg('Could not determine redshift from file name')
			self.z = -1

		#Convert to kms/s*(rho/8)
		self.kmsrho8 = self.raw_velocity*conv.velconvert(z = self.z)


		print_msg('...done')
def radial_average(input_array, box_dims, kbins=10):
    '''
	Radially average data. Mostly for internal use.
	
	Parameters: 
		* input_array (numpy array): the data array
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
		* kbins = 10 (integer or array-like): The number of bins,
			or a list containing the bin edges. If an integer is given, the bins
			are logarithmically spaced.
			
	Returns:
		A tuple with (data, bins, n_modes), where data is an array with the 
		averaged data, bins is an array with the bin centers and n_modes is the 
		number of modes in each bin

	'''

    k_comp, k = _get_k(input_array, box_dims)

    kbins = _get_kbins(kbins, box_dims, k)

    #Bin the data
    print_msg('Binning data...')
    dk = (kbins[1:] - kbins[:-1]) / 2.
    #Total power in each bin
    outdata = np.histogram(k.flatten(),
                           bins=kbins,
                           weights=input_array.flatten())[0]
    #Number of modes in each bin
    n_modes = np.histogram(k.flatten(), bins=kbins)[0].astype('float')
    outdata /= n_modes

    return outdata, kbins[:-1] + dk, n_modes
Beispiel #18
0
def radial_average(input_array, box_dims, kbins=10, binning='log', breakpoint=0.1):
	'''
	Radially average data. Mostly for internal use.
	
	Parameters: 
		* input_array (numpy array): the data array
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
		* kbins = 10 (integer or array-like): The number of bins,
			or a list containing the bin edges. If an integer is given, the bins
			are logarithmically spaced.
			
	Returns:
		A tuple with (data, bins, n_modes), where data is an array with the 
		averaged data, bins is an array with the bin centers and n_modes is the 
		number of modes in each bin

	'''

	k_comp, k = _get_k(input_array, box_dims)

	kbins = _get_kbins(kbins, box_dims, k, binning=binning, breakpoint=breakpoint)
	
	#Bin the data
	print_msg('Binning data...')
	dk = (kbins[1:]-kbins[:-1])/2.
	#Total power in each bin
	outdata = np.histogram(k.flatten(), bins=kbins,
						weights = input_array.flatten())[0]
	#Number of modes in each bin
	n_modes = np.histogram(k.flatten(), bins=kbins)[0].astype('float')
	outdata /= n_modes
	
	return outdata, kbins[:-1]+dk, n_modes
Beispiel #19
0
def calc_dt_full(xfrac, temp, dens, z=-1):
    """
	Calculate the differential brightness temperature assuming only that Lyman alpha is fully coupled so T_s = T_k
    (NOT T_s >> T_CMB)
	
	Parameters:
		* xfrac (XfracFile object, string or numpy array): the ionization fraction
        * temp (TemperFile object, string or numpy array): the temperature in K
		* dens (DensityFile object, string or numpy array): density in cgs units
		* z = -1 (float): The redshift (if < 0 this will be figured out from the files)
		
	Returns:
		The differential brightness temperature as a numpy array with
		the same dimensions as xfrac.
	"""

    xi, xi_type = get_data_and_type(xfrac)
    Ts, Ts_type = get_data_and_type(temp)
    rho, rho_type = get_data_and_type(dens)
    xi = xi.astype("float64")
    Ts = Ts.astype("float64")
    rho = rho.astype("float64")

    if z < 0:
        z = determine_redshift_from_filename(xfrac)
        if z < 0:
            z = determine_redshift_from_filename(dens)
            if z < 0:
                z = determine_redshift_from_filename(temp)
                if z < 0:
                    raise Exception("No redshift specified. Could not determine from file.")

    print_msg("Making full dT box for z=%f" % z)

    # Calculate dT
    print "calculating corrected dbt"
    return _dt_full_corrected(dens, xfrac, temp, z)  # rho, Ts, xi, z)
Beispiel #20
0
def power_spectrum_nd(input_array, box_dims = None):
	''' 
	Calculate the power spectrum of input_array and return it as an n-dimensional array,
	where n is the number of dimensions in input_array
	box_side is the size of the box in comoving Mpc. If this is set to None (default),
	the internal box size is used
	
	Parameters:
		* input_array (numpy array): the array to calculate the 
			power spectrum of. Can be of any dimensions.
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
	
	Returns:
		The power spectrum in the same dimensions as the input array.
		
	TODO:
		Also return k values.
	'''

	box_dims = _get_dims(box_dims, input_array.shape)

	print_msg( 'Calculating power spectrum...')
	ft = fftpack.fftshift(fftpack.fftn(input_array.astype('float64')))
	power_spectrum = np.abs(ft)**2
	print_msg( '...done')

	# scale
	boxvol = np.product(map(float,box_dims))
	pixelsize = boxvol/(np.product(input_array.shape))
	power_spectrum *= pixelsize**2/boxvol

	return power_spectrum
Beispiel #21
0
def physical_lightcone_to_observational(physical_lightcone,
                                        input_z_low,
                                        output_dnu,
                                        output_dtheta,
                                        input_box_size_mpc=None):
    '''
    Interpolate a lightcone volume from physical (length) units
    to observational (angle/frequency) units.
    
    Parameters:
        * physical_lightcone (numpy array): the lightcone volume
        * input_z_low (float): the lowest redshift of the input lightcone
        * output_dnu (float): the frequency resolution of the output volume in MHz
        * output_dtheta (float): the angular resolution of the output in arcmin
        * input_box_size_mpc (float): the size of the input FoV in Mpc.
            If None (default), this will be set to conv.LB
            
    Returns:
        * The output volume as a numpy array
        * The output frequencies in MHz as an array of floats
    '''
    if input_box_size_mpc == None:
        input_box_size_mpc = conv.LB

    #For each output redshift: average the corresponding slices
    hf.print_msg('Making observational lightcone...')
    hf.print_msg('Binning in frequency...')
    lightcone_freq, output_freqs = bin_lightcone_in_frequency(physical_lightcone,\
                                                         input_z_low, input_box_size_mpc, output_dnu)
    #Calculate the FoV in degrees at lowest z (largest one)
    fov_deg = cm.angular_size_comoving(input_box_size_mpc, input_z_low)
    #Calculate dimensions of output volume
    n_cells_theta = fov_deg * 60. / output_dtheta
    n_cells_nu = len(output_freqs)
    #Go through each slice and make angular slices for each one
    hf.print_msg('Binning in angle...')
    output_volume = np.zeros((n_cells_theta, n_cells_theta, n_cells_nu))
    for i in range(n_cells_nu):
        if i % 10 == 0:
            hf.print_msg('Slice %d of %d' % (i, n_cells_nu))
        z = cm.nu_to_z(output_freqs[i])
        output_volume[:,:,i] = physical_slice_to_angular(lightcone_freq[:,:,i], z, \
                                        slice_size_mpc=input_box_size_mpc, fov_deg=fov_deg,\
                                        dtheta=output_dtheta, order=2)

    return output_volume, output_freqs
Beispiel #22
0
def calc_dt_full(xfrac, dens, temp, z=-1, correct=True):
    '''
	Calculate the differential brightness temperature assuming only that Lyman alpha is fully coupled so T_s = T_k
    (NOT T_s >> T_CMB)
	
	Parameters:
		* xfrac (XfracFile object, string or numpy array): the ionization fraction
		* dens (DensityFile object, string or numpy array): density in cgs units
        	* temp (TemperFile object, string or numpy array): the temperature in K
		* z = -1 (float): The redshift (if < 0 this will be figured out from the files)
		* correct = True (bool): if true include a correction for partially ionized cells.

	Returns:
		The differential brightness temperature as a numpy array with
		the same dimensions as xfrac.
	'''

    xi, xi_type = get_data_and_type(xfrac)
    Ts, Ts_type = get_data_and_type(temp)
    rho, rho_type = get_data_and_type(dens)
    xi = xi.astype('float64')
    Ts = Ts.astype('float64')
    rho = rho.astype('float64')

    if z < 0:
        z = determine_redshift_from_filename(xfrac)
        if z < 0: z = determine_redshift_from_filename(dens)
        if z < 0: z = determine_redshift_from_filename(temp)
        if z < 0:
            raise Exception(
                'No redshift specified. Could not determine from file.')

    print_msg('Making full dT box for z=%f' % z)

    print "Calculating corrected dbt"
    return _dt_full(rho, xi, Ts, z, correct)
Beispiel #23
0
def physical_lightcone_to_observational(physical_lightcone, input_z_low, output_dnu, output_dtheta, input_box_size_mpc=None):
    '''
    Interpolate a lightcone volume from physical (length) units
    to observational (angle/frequency) units.
    
    Parameters:
        * physical_lightcone (numpy array): the lightcone volume
        * input_z_low (float): the lowest redshift of the input lightcone
        * output_dnu (float): the frequency resolution of the output volume in MHz
        * output_dtheta (float): the angular resolution of the output in arcmin
        * input_box_size_mpc (float): the size of the input FoV in Mpc.
            If None (default), this will be set to conv.LB
            
    Returns:
        * The output volume as a numpy array
        * The output frequencies in MHz as an array of floats
    '''
    if input_box_size_mpc == None:
        input_box_size_mpc = conv.LB
    
    #For each output redshift: average the corresponding slices
    hf.print_msg('Making observational lightcone...')
    hf.print_msg('Binning in frequency...')
    lightcone_freq, output_freqs = bin_lightcone_in_frequency(physical_lightcone,\
                                                         input_z_low, input_box_size_mpc, output_dnu)
    #Calculate the FoV in degrees at lowest z (largest one)
    fov_deg = cm.angular_size_comoving(input_box_size_mpc, input_z_low)
    #Calculate dimensions of output volume
    n_cells_theta = fov_deg*60./output_dtheta
    n_cells_nu = len(output_freqs)
    #Go through each slice and make angular slices for each one
    hf.print_msg('Binning in angle...')
    output_volume = np.zeros((n_cells_theta, n_cells_theta, n_cells_nu))
    for i in range(n_cells_nu):
        if i%10 == 0:
            hf.print_msg('Slice %d of %d' % (i, n_cells_nu))
        z = cm.nu_to_z(output_freqs[i])
        output_volume[:,:,i] = physical_slice_to_angular(lightcone_freq[:,:,i], z, \
                                        slice_size_mpc=input_box_size_mpc, fov_deg=fov_deg,\
                                        dtheta=output_dtheta, order=2)
        
    return output_volume, output_freqs
Beispiel #24
0
    def read_from_file(self,
                       filename,
                       old_format=False,
                       neutral=False,
                       binary_format=False):
        '''
		Read data from file.
		
		Parameters:
			* filename (string): the file to read from.
			* old_format = False (bool): whether to use the old-style (32 bits)
				file format.
                        * neutral = False (bool): whether the content is the neutral or ionized fraction
                        * binary_format = False (bool): whether the file is in Fortran unformatted or binary (no record separators) format 
		Returns:
			Nothing
		'''
        print_msg('Reading xfrac file:%s...' % filename)
        self.filename = filename

        f = open(filename, 'rb')
        if binary_format:
            temp_mesh = np.fromfile(f, count=3, dtype='int32')
            self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[0:2]
        else:
            temp_mesh = np.fromfile(f, count=6, dtype='int32')
            self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[1:4]

        if old_format:
            self.xi = np.fromfile(f, dtype='float32')
        else:
            self.xi = np.fromfile(f, dtype='float64')
        self.xi = self.xi.reshape((self.mesh_x, self.mesh_y, self.mesh_z),
                                  order='F')

        if neutral:
            self.xi = 1.0 - self.xi

        f.close()
        print_msg('...done')

        #Store the redshift from the filename
        import os.path
        try:
            name = os.path.split(filename)[1]
            self.z = float(name.split('_')[1][:-4])
        except:
            print_msg('Could not determine redshift from file name')
            self.z = -1
Beispiel #25
0
def set_sim_constants(boxsize_cMpc):
    '''This method will set the values of relevant constants depending on the 
	simulation
	
	Parameters:
		* boxsize_cMpc (float): he box size in cMpc/h
		Valid values are 37, 64, 114 or 425
		
	Returns:
		Nothing.
	'''
    global boxsize, LB, nbox_fine, M_box, M_grid, lscale, tscale, velconvert

    boxsize = boxsize_cMpc
    LB = boxsize / const.h
    if hf.flt_comp(boxsize, 425.):
        hf.print_msg('Setting conversion factors for 425/h Mpc box')
        nbox_fine = 10976
    elif hf.flt_comp(boxsize, 114.):
        hf.print_msg('Setting conversion factors for 114/h Mpc box')
        nbox_fine = 6144
    elif hf.flt_comp(boxsize, 64.):
        hf.print_msg('Setting conversion factors for 64/h Mpc box')
        nbox_fine = 3456
    elif hf.flt_comp(boxsize, 37.):
        hf.print_msg('Setting conversion factors for 37/h Mpc box')
        nbox_fine = 2048
    else:
        raise Exception('Invalid boxsize (%.3f cMpc)' % boxsize_cMpc)

    M_box = const.rho_matter * (LB * const.Mpc)**3  # mass in box (g, not M0)
    M_grid = M_box / (float(nbox_fine)**3)
    lscale = (
        LB) / float(nbox_fine) * const.Mpc  # size of a cell in cm, comoving
    tscale = 2.0 / (3.0 * np.sqrt(const.Omega0) * const.H0 / const.Mpc * 1.e5
                    )  # time scale, when divided by (1+z)2
    velconvert = lambda z: lscale / tscale * (1.0 + z) / 1.e5
Beispiel #26
0
	def read_from_file(self, filename, old_format=False, neutral=False,
                           binary_format=False):
		'''
		Read data from file.
		
		Parameters:
			* filename (string): the file to read from.
			* old_format = False (bool): whether to use the old-style (32 bits)
				file format.
                        * neutral = False (bool): whether the content is the neutral or ionized fraction
                        * binary_format = False (bool): whether the file is in Fortran unformatted or binary (no record separators) format 
		Returns:
			Nothing
		'''
		print_msg('Reading xfrac file:%s...' % filename)
		self.filename = filename

		f = open(filename, 'rb')
                if binary_format:
                        temp_mesh = np.fromfile(f, count=3, dtype='int32')
                        self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[0:2]
                else:
                        temp_mesh = np.fromfile(f, count=6, dtype='int32')
                        self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[1:4]

		if old_format:
			self.xi = np.fromfile(f, dtype='float32')
		else:
			self.xi = np.fromfile(f, dtype='float64')
		self.xi = self.xi.reshape((self.mesh_x, self.mesh_y, self.mesh_z), order='F')

		if neutral:
                        self.xi = 1.0-self.xi

		f.close()
		print_msg('...done')

		#Store the redshift from the filename
		import os.path
		try:
			name = os.path.split(filename)[1]
			self.z = float(name.split('_')[1][:-4])
		except:
			print_msg('Could not determine redshift from file name')
			self.z = -1
Beispiel #27
0
    def read_from_file(self, filename, old_format=False):
        '''
        Read data from file.
            
        Parameters:
            * filename (string): the file to read from.
            * old_format = False (bool): whether to use the old-style (32 bits)
                file format.
        Returns:
            Nothing
        '''
        print_msg('Reading IonRates3 file:%s...' % filename)
        self.filename = filename

        f = open(filename, 'rb')
        temp_mesh = np.fromfile(f, count=6, dtype='int32')
        self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[1:4]

        if old_format:
            self.irate = np.fromfile(f, dtype='float32')
        else:
            self.irate = np.fromfile(f,
                                     count=self.mesh_x * self.mesh_y *
                                     self.mesh_z,
                                     dtype='float32')
            self.irate = self.irate.reshape(
                (self.mesh_x, self.mesh_y, self.mesh_z), order='F')

        f.close()
        print_msg('...done')

        #Store the redshift from the filename
        import os.path
        try:
            name = os.path.split(filename)[1]
            self.z = float(name.split('_')[1][:-4])
        except:
            print_msg('Could not determine redshift from file name')
            self.z = -1
Beispiel #28
0
def get_distorted_dt(dT,
                     kms,
                     redsh,
                     los_axis=0,
                     velocity_axis=0,
                     num_particles=10,
                     periodic=True):
    ''' 
    Apply peculiar velocity distortions to a differential
    temperature box, using the Mesh-Particle-Mesh method,
    as described in http://arxiv.org/abs/1303.5627
    
    Parameters:
        * dT (numpy array): the differential temperature box
        * kms (numpy array): velocity in km/s, array of dimensions 
            (3,mx,my,mz) where (mx,my,mz) is dimensions of dT
        * redsh (float): the redshift
        * los_axis = 0 (int): the line-of-sight axis of the output volume
            (must be 0, 1 or 2)
        * velocity_axis = 0 (int): the index that indicates los velocity
        * num_particles = 10 (int): the number of particles to use per cell
            A higher number gives better accuracy, but worse performance.
        * periodic = True (bool): whether or not to apply periodic boundary
            conditions along the line-of-sight. If you are making a lightcone
            volume, this should be False.
        
    Returns:
        The redshift space box as a numpy array with same dimensions as dT.
        
    Example:
        Read a density file, a velocity file and an xfrac file, calculate the 
        brightness temperature, and convert it to redshift space.
        
        >>> vfile = c2t.VelocityFile('/path/to/data/8.515v_all.dat')
        >>> dfile = c2t.DensityFile('/path/to/data/8.515n_all.dat')
        >>> xfile = c2t.XfracFile('/path/to/data/xfrac3d_8.515.bin')
        >>> dT = c2t.calc_dt(xfile, dfile)
        >>> kms = vfile.get_kms_from_density(dfile)
        >>> dT_zspace = get_distorted_dt(dT, kms, dfile.z, los_axis = 0)
        
    .. note::
        At the moment, it is a requirement that dimensions perpendicular to
        the line-of-sight are equal. For example, if the box dimensions are
        (mx, my, mz) and the line-of-sight is along the z axis, then mx
        has to be equal to my.
        
    .. note::
        If dT is a lightcone volume, los_axis is not necessarily the
        same as velocity_axis. The lightcone volume methods in c2raytools
        all give output volumes that have the line-of-sight as the last index,
        regardless of the line-of-sight axis. For these volumes, you should
        always use los_axis=2 and set velocity_axis equal to whatever was
        used when producing the real-space lightcones.
    
    '''
    #Volume dimensions
    mx, my, mz = dT.shape
    assert (mx == my or my == mz
            or mx == mz)  #TODO: this should not be a requirement
    grid_depth = dT.shape[los_axis]
    grid_width = dT.shape[(los_axis + 1) % 3]
    box_depth = grid_depth * (conv.LB / float(grid_width))

    #Take care of different LOS axes
    assert (los_axis == 0 or los_axis == 1 or los_axis == 2)
    if los_axis == 0:
        get_skewer = lambda data, i, j: data[:, i, j]
    elif los_axis == 1:
        get_skewer = lambda data, i, j: data[i, :, j]
    else:
        get_skewer = lambda data, i, j: data[i, j, :]

    #Input redshift can be a float or an array, but we need an array
    redsh = np.atleast_1d(redsh)

    print_msg('Making velocity-distorted box...')
    print_msg('The (min) redshift is %.3f' % redsh[0])
    print_msg('The box size is %.3f cMpc' % conv.LB)

    #Figure out the apparent position shift
    vpar = kms[velocity_axis, :, :, :]
    z_obs = (1 + redsh) / (1. - vpar / const.c) - 1.
    dr = (1. + z_obs) * vpar / const.Hz(z_obs)

    #Make the distorted box
    distbox = np.zeros_like(dT)
    particle_dT = np.zeros(grid_depth * num_particles)

    last_percent = 0
    for i in range(grid_width):
        percent_done = int(float(i) / float(grid_width) * 100)
        if percent_done % 10 == 0 and percent_done != last_percent:
            print_msg('%d %%' % percent_done)
            last_percent = percent_done
        for j in range(grid_width):

            #Take a 1D skewer from the dT box
            dT_skewer = get_skewer(dT, i, j)

            #Create particles along the skewer and assign dT to the particles
            particle_pos = np.linspace(0, box_depth,
                                       grid_depth * num_particles)
            for n in range(num_particles):
                particle_dT[n::num_particles] = dT_skewer / float(
                    num_particles)

            #Calculate LOS velocity for each particle
            dr_skewer_pad = get_skewer(dr, i, j)
            np.insert(dr_skewer_pad, 0, dr_skewer_pad[-1])
            dr_skewer = get_interpolated_array(dr_skewer_pad,
                                               len(particle_pos), 'linear')

            #Apply velocity shift
            particle_pos += dr_skewer

            #Periodic boundary conditions
            if periodic:
                particle_pos[np.where(particle_pos < 0)] += box_depth
                particle_pos[np.where(particle_pos > box_depth)] -= box_depth

            #Regrid particles to original resolution
            dist_skewer = np.histogram(particle_pos, \
                                       bins=np.linspace(0, box_depth, grid_depth+1), \
                                       weights=particle_dT)[0]
            if los_axis == 0:
                distbox[:, i, j] = dist_skewer
            elif los_axis == 1:
                distbox[i, :, j] = dist_skewer
            else:
                distbox[i, j, :] = dist_skewer

    print_msg('Old dT (mean,var): %3f, %.3f' %
              (dT.astype('float64').mean(), dT.var()))
    print_msg('New dT (mean,var): %.3f, %.3f' %
              (distbox.mean(), distbox.var()))
    return distbox
Beispiel #29
0
    def read_from_file(self,
                       filename,
                       min_select_mass=0.0,
                       max_select_mass=None,
                       max_select_number=-1,
                       startline=0):
        '''
		Read a halo list.
		
		Parameters:
			* filename (string): The file to read from
			* min_select_mass = 0.0 (float): The lower threshold mass in solar masses.
				Only halos above this mass will be read.
			* max_select_mass = None (float): The upper threshold mass in solar masses.
				Only halos below this mass will be read. If None, there is no limit.
			* max_select_number = -1 (int): The max number of halos to read. If -1, there
				is no limit.
			* startline = 0 (int): The line in the file where reading will start.
		Returns:
			True if all the halos were read. False otherwise.
		'''

        self.halos = []

        print_msg('Reading halo file %s...' % filename)
        self.filename = filename
        import fileinput

        #Store the redshift from the filename
        import os.path
        name = os.path.split(filename)[1]
        self.z = float(name.split('halo')[0])

        #Read the file line by line, since it's large
        linenumber = 1
        min_select_grid_mass = min_select_mass / (conv.M_grid *
                                                  const.solar_masses_per_gram)
        if max_select_mass:
            print_msg('Max_select_mass: %g' % max_select_mass)
            max_select_grid_mass = max_select_mass / (
                conv.M_grid * const.solar_masses_per_gram)

        for line in fileinput.input(filename):
            if linenumber < startline:  #If you want to read from a particular line
                linenumber += 1
                continue
            if max_select_number >= 0 and len(self.halos) >= max_select_number:
                fileinput.close()
                return False
            if linenumber % 100000 == 0:
                print_msg('Read %d lines' % linenumber)
            linenumber += 1

            vals = line.split()
            grid_mass = float(vals[-3])

            #Create a halo and add it to the list
            if grid_mass > min_select_grid_mass and (
                    max_select_mass == None
                    or grid_mass < max_select_grid_mass):
                halo = Halo()
                halo.pos = np.array(map(float, vals[:3]))
                halo.pos_cm = np.array(map(float, vals[3:6]))
                halo.vel = np.array(map(float, vals[6:9]))
                halo.l = np.array(map(float, vals[9:12]))
                halo.vel_disp = float(vals[12])
                halo.r = float(vals[13])
                halo.m = float(vals[14])
                halo.mp = float(vals[15])
                halo.solar_masses = grid_mass * conv.M_grid * const.solar_masses_per_gram
                self.halos.append(halo)

        fileinput.close()

        return True
Beispiel #30
0
def freq_box(xfrac_dir, dens_dir, z_low, z_high):
    ''' 
    Make frequency (lightcone) boxes of density, ionized fractions, 
    and brightness temperature. The function reads xfrac and density
    files from the specified directories and combines them into a 
    lighcone box going from z_low to z_high.
    
    This routine is more or less a direct translation of Garrelt's 
    IDL routine.
    
    Parameters: 
        * xfrac_dir (string): directory containing xfrac files
        * dens_dir (string): directory containing density files
        * z_low (float): lowest redshift to include
        * z_high (float): highest redshift to include.

    Returns: 
        Tuple with (density box, xfrac box, dt box, redshifts), where
        density box, xfrac box and dt box are numpy arrays containing
        the lightcone quantities. redshifts is an array containing the 
        redshift for each slice.
        
    .. note::
        Since this function relies on filenames to get redshifts,
        all the data files must follow the common naming convenstions.
        Ionization files must be named xfrac3d_z.bin and densityfiles
        zn_all.dat
        
    .. note::
        The make_lightcone method is meant to replace this method. It
        is more general and easier to use.
    
    Example:
        Make a lightcone cube ranging from z = 7 to z = 8:
    
        >>> xfrac_dir = '/path/to/data/xfracs/'
        >>> dens_dir = '/path/to/data/density/'
        >>> xcube, dcube, dtcube, z = c2t.freq_box(xfrac_dir, density_dir, z_low=7.0, z_high=8.)
        
    '''

    #Get the list of redshifts where we have simulation output files
    dens_redshifts = get_dens_redshifts(dens_dir, z_low)
    mesh_size = get_mesh_size(
        os.path.join(dens_dir, '%.3fn_all.dat' % dens_redshifts[0]))

    #Get the list of redhifts and frequencies that we want for the observational box
    output_z = redshifts_at_equal_comoving_distance(z_low,
                                                    z_high,
                                                    box_grid_n=mesh_size[0])
    output_z = output_z[output_z > dens_redshifts[0]]
    output_z = output_z[output_z < dens_redshifts[-1]]
    if len(output_z) < 1:
        raise Exception('No valid redshifts in range!')

    #Keep track of output simulation files to use
    xfrac_file_low = XfracFile()
    xfrac_file_high = XfracFile()
    dens_file_low = DensityFile()
    dens_file_high = DensityFile()
    z_bracket_low = None
    z_bracket_high = None

    #The current position in comoving coordinates
    comoving_pos_idx = 0

    #Build the cube
    xfrac_lightcone = np.zeros((mesh_size[0], mesh_size[1], len(output_z)))
    dens_lightcone = np.zeros_like(xfrac_lightcone)
    dt_lightcone = np.zeros_like(xfrac_lightcone)

    for z in output_z:
        #Find the output files that bracket the redshift
        z_bracket_low_new = dens_redshifts[dens_redshifts <= z][0]
        z_bracket_high_new = dens_redshifts[dens_redshifts >= z][0]

        if z_bracket_low_new != z_bracket_low:
            z_bracket_low = z_bracket_low_new
            xfrac_file_low = XfracFile(
                os.path.join(xfrac_dir, 'xfrac3d_%.3f.bin' % z_bracket_low))
            dens_file_low = DensityFile(
                os.path.join(dens_dir, '%.3fn_all.dat' % z_bracket_low))
            dt_cube_low = calc_dt(xfrac_file_low, dens_file_low)

        if z_bracket_high_new != z_bracket_high:
            z_bracket_high = z_bracket_high_new
            xfrac_file_high = XfracFile(
                os.path.join(xfrac_dir, 'xfrac3d_%.3f.bin' % z_bracket_high))
            dens_file_high = DensityFile(
                os.path.join(dens_dir, '%.3fn_all.dat' % z_bracket_high))
            dt_cube_high = calc_dt(xfrac_file_high, dens_file_high)

        slice_ind = comoving_pos_idx % xfrac_file_high.mesh_x

        #Ionized fraction
        xi_interp = _get_interp_slice(xfrac_file_high.xi, xfrac_file_low.xi, z_bracket_high, \
                                    z_bracket_low, z, comoving_pos_idx)
        xfrac_lightcone[:, :, comoving_pos_idx] = xi_interp

        #Density
        rho_interp = _get_interp_slice(dens_file_high.cgs_density, dens_file_low.cgs_density, z_bracket_high, \
                                    z_bracket_low, z, comoving_pos_idx)
        dens_lightcone[:, :, comoving_pos_idx] = rho_interp

        #Brightness temperature
        dt_interp = _get_interp_slice(dt_cube_high, dt_cube_low, z_bracket_high, \
                                    z_bracket_low, z, comoving_pos_idx)
        dt_lightcone[:, :, comoving_pos_idx] = dt_interp

        print_msg('Slice %d of %d' % (comoving_pos_idx, len(output_z)))
        comoving_pos_idx += 1

    return xfrac_lightcone, dens_lightcone, dt_lightcone, output_z
Beispiel #31
0
def make_lightcone(filenames, z_low = None, z_high = None, file_redshifts = None, \
                cbin_bits = 32, cbin_order = 'c', los_axis = 0, raw_density = False, interpolation='linear'):
    '''
    Make a lightcone from xfrac, density or dT data. Replaces freq_box.
    
    Parameters:
        * filenames (string or array): The coeval cubes. 
            Can be either any of the following:
            
                - An array with the file names
                
                - A text file containing the file names
                
                - The directory containing the files (must only contain 
                one type of files)
        * z_low (float): the lowest redshift. If not given, the redshift of the 
            lowest-z coeval cube is used.
        * z_high (float): the highest redshift. If not given, the redshift of the 
            highest-z coeval cube is used.
        * file_redshifts (string or array): The redshifts of the coeval cubes.
            Can be any of the following types:
            
            - None: determine the redshifts from file names
             
            - array: array containing the redshift of each coeval cube
            
            - filename: the name of a data file to read the redshifts from
            
        * cbin_bits (int): If the data files are in cbin format, you may specify 
            the number of bits.
        * cbin_order (char): If the data files are in cbin format, you may specify 
            the order of the data.
        * los_axis (int): the axis to use as line-of-sight for the coeval cubes
        * raw_density (bool): if this is true, and the data is a 
            density file, the raw (simulation units) density will be returned
            instead of the density in cgs units
        * interpolation (string): can be 'linear', 'step', 'sigmoid' or
            'step_cell'. 
            Determines how slices in between output redshifts are interpolated.
    Returns:
        (lightcone, z) tuple
        
        lightcone is the lightcone volume where the first two axes
        have the same size as the input cubes
        
        z is an array containing the redshifts along the line-of-sight
        
    .. note::
        If z_low is given, that redshift will be the lowest one included,
        even if there is no coeval box at exactly that redshift. This can 
        give results that are subtly different from results calculated with
        the old freq_box routine.
    '''

    if not interpolation in ['linear', 'step', 'sigmoid', 'step_cell']:
        raise ValueError('Unknown interpolation type: %s' % interpolation)

    #Figure out output redshifts, file names and size of output
    filenames = _get_filenames(filenames)
    file_redshifts = _get_file_redshifts(file_redshifts, filenames)
    assert len(file_redshifts) == len(filenames)
    mesh_size = get_mesh_size(filenames[0])

    output_z = _get_output_z(file_redshifts, z_low, z_high, mesh_size[0])

    #Make the output 32-bit to save memory
    lightcone = np.zeros((mesh_size[0], mesh_size[1], len(output_z)),
                         dtype='float32')

    comoving_pos_idx = 0
    z_bracket_low = None
    z_bracket_high = None
    data_low = None
    data_high = None

    #Make the lightcone, one slice at a time
    print_msg('Making lightcone between %f < z < %f' %
              (output_z.min(), output_z.max()))
    for z in output_z:
        z_bracket_low_new = file_redshifts[file_redshifts <= z].max()
        z_bracket_high_new = file_redshifts[file_redshifts > z].min()

        #Do we need a new file for the low z?
        if z_bracket_low_new != z_bracket_low:
            z_bracket_low = z_bracket_low_new
            file_idx = np.argmin(np.abs(file_redshifts - z_bracket_low))
            if data_high == None:
                data_low, datatype = get_data_and_type(filenames[file_idx],
                                                       cbin_bits, cbin_order,
                                                       raw_density)
            else:  #No need to read the file again
                data_low = data_high

        #Do we need a new file for the high z?
        if z_bracket_high_new != z_bracket_high:
            z_bracket_high = z_bracket_high_new
            file_idx = np.argmin(np.abs(file_redshifts - z_bracket_high))
            data_high, datatype = get_data_and_type(filenames[file_idx],
                                                    cbin_bits, cbin_order,
                                                    raw_density)

        #Make the slice by interpolating, then move to next index
        data_interp = _get_interp_slice(data_high, data_low, z_bracket_high, \
                                    z_bracket_low, z, comoving_pos_idx, los_axis, interpolation)
        lightcone[:, :, comoving_pos_idx] = data_interp

        comoving_pos_idx += 1

    return lightcone, output_z
Beispiel #32
0
def freq_box(xfrac_dir, dens_dir, z_low, z_high):
    """ 
    Make frequency (lightcone) boxes of density, ionized fractions, 
    and brightness temperature. The function reads xfrac and density
    files from the specified directories and combines them into a 
    lighcone box going from z_low to z_high.
    
    This routine is more or less a direct translation of Garrelt's 
    IDL routine.
    
    Parameters: 
        * xfrac_dir (string): directory containing xfrac files
        * dens_dir (string): directory containing density files
        * z_low (float): lowest redshift to include
        * z_high (float): highest redshift to include.

    Returns: 
        Tuple with (density box, xfrac box, dt box, redshifts), where
        density box, xfrac box and dt box are numpy arrays containing
        the lightcone quantities. redshifts is an array containing the 
        redshift for each slice.
        
    .. note::
        Since this function relies on filenames to get redshifts,
        all the data files must follow the common naming convenstions.
        Ionization files must be named xfrac3d_z.bin and densityfiles
        zn_all.dat
        
    .. note::
        The make_lightcone method is meant to replace this method. It
        is more general and easier to use.
    
    Example:
        Make a lightcone cube ranging from z = 7 to z = 8:
    
        >>> xfrac_dir = '/path/to/data/xfracs/'
        >>> dens_dir = '/path/to/data/density/'
        >>> xcube, dcube, dtcube, z = c2t.freq_box(xfrac_dir, density_dir, z_low=7.0, z_high=8.)
        
    """

    # Get the list of redshifts where we have simulation output files
    dens_redshifts = get_dens_redshifts(dens_dir, z_low)
    mesh_size = get_mesh_size(os.path.join(dens_dir, "%.3fn_all.dat" % dens_redshifts[0]))

    # Get the list of redhifts and frequencies that we want for the observational box
    output_z = redshifts_at_equal_comoving_distance(z_low, z_high, box_grid_n=mesh_size[0])
    output_z = output_z[output_z > dens_redshifts[0]]
    output_z = output_z[output_z < dens_redshifts[-1]]
    if len(output_z) < 1:
        raise Exception("No valid redshifts in range!")

    # Keep track of output simulation files to use
    xfrac_file_low = XfracFile()
    xfrac_file_high = XfracFile()
    dens_file_low = DensityFile()
    dens_file_high = DensityFile()
    z_bracket_low = None
    z_bracket_high = None

    # The current position in comoving coordinates
    comoving_pos_idx = 0

    # Build the cube
    xfrac_lightcone = np.zeros((mesh_size[0], mesh_size[1], len(output_z)))
    dens_lightcone = np.zeros_like(xfrac_lightcone)
    dt_lightcone = np.zeros_like(xfrac_lightcone)

    for z in output_z:
        # Find the output files that bracket the redshift
        z_bracket_low_new = dens_redshifts[dens_redshifts <= z][0]
        z_bracket_high_new = dens_redshifts[dens_redshifts >= z][0]

        if z_bracket_low_new != z_bracket_low:
            z_bracket_low = z_bracket_low_new
            xfrac_file_low = XfracFile(os.path.join(xfrac_dir, "xfrac3d_%.3f.bin" % z_bracket_low))
            dens_file_low = DensityFile(os.path.join(dens_dir, "%.3fn_all.dat" % z_bracket_low))
            dt_cube_low = calc_dt(xfrac_file_low, dens_file_low)

        if z_bracket_high_new != z_bracket_high:
            z_bracket_high = z_bracket_high_new
            xfrac_file_high = XfracFile(os.path.join(xfrac_dir, "xfrac3d_%.3f.bin" % z_bracket_high))
            dens_file_high = DensityFile(os.path.join(dens_dir, "%.3fn_all.dat" % z_bracket_high))
            dt_cube_high = calc_dt(xfrac_file_high, dens_file_high)

        slice_ind = comoving_pos_idx % xfrac_file_high.mesh_x

        # Ionized fraction
        xi_interp = _get_interp_slice(
            xfrac_file_high.xi, xfrac_file_low.xi, z_bracket_high, z_bracket_low, z, comoving_pos_idx
        )
        xfrac_lightcone[:, :, comoving_pos_idx] = xi_interp

        # Density
        rho_interp = _get_interp_slice(
            dens_file_high.cgs_density, dens_file_low.cgs_density, z_bracket_high, z_bracket_low, z, comoving_pos_idx
        )
        dens_lightcone[:, :, comoving_pos_idx] = rho_interp

        # Brightness temperature
        dt_interp = _get_interp_slice(dt_cube_high, dt_cube_low, z_bracket_high, z_bracket_low, z, comoving_pos_idx)
        dt_lightcone[:, :, comoving_pos_idx] = dt_interp

        print_msg("Slice %d of %d" % (comoving_pos_idx, len(output_z)))
        comoving_pos_idx += 1

    return xfrac_lightcone, dens_lightcone, dt_lightcone, output_z
Beispiel #33
0
def power_spectrum_1d_cic(input_array_nd,
                          box_dims,
                          kbins=100,
                          return_n_modes=False,
                          return_kernel=False):
    ''' Calculate the spherically averaged power spectrum of an array
    and return it as a one-dimensional array. Performs a deconvolution with
    CIC kernel

    Parameters:
            * input_array_nd (numpy array): the data array
            * kbins = 100 (integer or array-like): The number of bins,
                    or a list containing the bin edges. If an integer is given, the bins
                    are logarithmically spaced.
            * box_dims = None (float or array-like): the dimensions of the
                    box. If this is None, the current box volume is used along all
                    dimensions. If it is a float, this is taken as the box length
                    along all dimensions. If it is an array-like, the elements are
                    taken as the box length along each axis.
            * return_n_modes = False (bool): if true, also return the
                    number of modes in each bin

    Returns:
            A tuple with (Pk, bins), where Pk is an array with the
            power spectrum and bins is an array with the k bin centers.
    '''
    def _radial_average(input_array, k, box_dims, kbins=10):
        '''
        Radially average data. Mostly for internal use.

        Parameters: 
                * input_array (numpy array): the data array
                * box_dims = None (float or array-like): the dimensions of the 
                        box. If this is None, the current box volume is used along all
                        dimensions. If it is a float, this is taken as the box length
                        along all dimensions. If it is an array-like, the elements are
                        taken as the box length along each axis.
                * kbins = 10 (integer or array-like): The number of bins,
                        or a list containing the bin edges. If an integer is given, the bins
                        are logarithmically spaced.

        Returns:
                A tuple with (data, bins, n_modes), where data is an array with the 
                averaged data, bins is an array with the bin centers and n_modes is the 
                number of modes in each bin

        '''

        kbins = _get_kbins(kbins, box_dims, k)

        # Bin the data
        print_msg('Binning data...')
        dk = (kbins[1:] - kbins[:-1]) / 2.
        # Total power in each bin
        outdata = np.histogram(k.flatten(),
                               bins=kbins,
                               weights=input_array.flatten())[0]
        # Number of modes in each bin
        n_modes = np.histogram(k.flatten(), bins=kbins)[0].astype('float')
        outdata /= n_modes

        return outdata, kbins[:-1] + dk, n_modes

    import _power_spectrum
    N = input_array_nd.shape[0]

    print_msg("Computing CIC kernel...")
    W = _power_spectrum.cic_window_function(N)
    print_msg("Done")

    boxsize = box_dims[0]
    boxvol = np.product(map(float, box_dims))
    pixelsize = boxvol / (np.product(input_array_nd.shape))

    print_msg("Sampling fourier modes...")
    kk = _power_spectrum.fft_sample_spacing(N, boxsize)
    print_msg("Done")

    # Compute power spectrum
    print_msg("Computing power spectrum...")
    input_array = fft.fftn(input_array_nd)
    input_array = np.abs(input_array)**2
    input_array /= W  # deconvolve with kernel

    # Account for grid spacing and boxsize
    input_array *= pixelsize**2 / boxvol
    print_msg("Done")

    # Spherically average
    ps, kbins, nmodes = _radial_average(input_array, kk, box_dims, kbins=kbins)

    # Return
    if return_n_modes:
        if return_kernel:
            return ps, kbins, nmodes, W
        return ps, kbins, nmodes
    if return_kernel:
        return ps, kbins, W
    return ps, kbins
Beispiel #34
0
def mu_binning(powerspectrum, los_axis=0, mubins=20, kbins=10, box_dims=None, weights=None):
    """
	Bin a power spectrum in mu and k. For internal use
	"""

    if weights != None:
        powerspectrum *= weights

    dim = len(powerspectrum.shape)
    assert dim == 3

    x, y, z = np.indices(powerspectrum.shape)
    center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0, (z.max() - z.min()) / 2.0])
    kx = 2.0 * np.pi * (x - center[0]) / box_dims[0]
    ky = 2.0 * np.pi * (y - center[1]) / box_dims[1]
    kz = 2.0 * np.pi * (z - center[2]) / box_dims[2]
    k = np.sqrt(kx ** 2 + ky ** 2 + kz ** 2)

    # Line-of-sight distance from center
    if los_axis == 0:
        los_dist = kx
    elif los_axis == 1:
        los_dist = ky
    elif los_axis == 2:
        los_dist = kz
    else:
        raise Exception("Your space is not %d-dimensional!" % los_axis)

        # mu=cos(theta) = k_par/k
    mu = los_dist / np.abs(k)
    mu[np.where(k < 0.001)] = np.nan

    # Calculate k values, and make bins
    if isinstance(kbins, int):
        kbins = 10 ** np.linspace(np.log10(k.min()), np.log10(k.max()), kbins + 1)
    dk = (kbins[1:] - kbins[:-1]) / 2.0
    n_kbins = len(kbins) - 1

    # Exclude the k_x = 0, k_y = 0, k_z = 0 modes
    zero_ind = (x == k.shape[0] / 2) + (y == k.shape[1] / 2) + (z == k.shape[2] / 2)
    powerspectrum[zero_ind] = 0.0
    # k[zero_ind] = -1.

    # Make mu bins
    if isinstance(mubins, int):
        mubins = np.linspace(-1.0, 1.0, mubins + 1)
    dmu = (mubins[1:] - mubins[:-1]) / 2.0
    n_mubins = len(mubins) - 1

    # Remove the zero component from the power spectrum. mu is undefined here
    powerspectrum[tuple(np.array(powerspectrum.shape) / 2)] = 0.0

    # Bin the data
    utils.print_msg("Binning data...")
    outdata = np.zeros((n_mubins, n_kbins))
    for ki in range(n_kbins):
        kmin = kbins[ki]
        kmax = kbins[ki + 1]
        kidx = (k >= kmin) * (k < kmax)
        for i in range(n_mubins):
            mu_min = mubins[i]
            mu_max = mubins[i + 1]
            idx = (mu >= mu_min) * (mu < mu_max) * kidx
            outdata[i, ki] = np.mean(powerspectrum[idx])

            if weights != None:
                outdata[i, ki] /= weights[idx].mean()

    return outdata, mubins[:-1] + dmu, kbins[:-1] + dk
Beispiel #35
0
def get_distorted_dt(dT, kms, redsh, los_axis=0, num_particles=10):
	''' 
	Apply peculiar velocity distortions to a differential
	temperature box, using the Mesh-Particle-Mesh method,
	as described in http://arxiv.org/abs/1303.5627
	
	Parameters:
		* dT (numpy array): the differential temperature box
		* kms (numpy array): velocity in km/s, array of dimensions 
			(3,mx,my,mz) where (mx,my,mz) is dimensions of dT
		* redsh (float): the redshift
		* los_axis = 0 (int): the line-of-sight axis (must be 0, 1 or 2)
		* num_particles = 10 (int): the number of particles to use per cell
			A higher number gives better accuracy, but worse performance.
		
	Returns:
		The redshift space box as a numpy array with same dimensions as dT.
		
	Example:
		Read a density file, a velocity file and an xfrac file, calculate the 
		brightness temperature, and convert it to redshift space.
		
		>>> vfile = c2t.VelocityFile('/path/to/data/8.515v_all.dat')
		>>> dfile = c2t.DensityFile('/path/to/data/8.515n_all.dat')
		>>> xfile = c2t.XfracFile('/path/to/data/xfrac3d_8.515.bin')
		>>> dT = c2t.calc_dt(xfile, dfile)
		>>> kms = vfile.get_kms_from_density(dfile)
		>>> dT_zspace = get_distorted_dt(dT, dfile, dfile.z, los_axis = 0)
	'''


	#Take care of different LOS axes
	assert (los_axis == 0 or los_axis == 1 or los_axis == 2)
	if los_axis == 0:
		get_slice = lambda data, i, j : data[:,i,j]
	elif los_axis == 1:
		get_slice = lambda data, i, j : data[i,:,j]
	else:
		get_slice = lambda data, i, j : data[i,j,:]

	#Dimensions
	mx,my,mz = dT.shape

	print_msg('Making velocity-distorted box...')
	print_msg('The redshift is %.3f' % redsh)
	print_msg('The box size is %.3f cMpc' % conv.LB)
	
	#Figure out the apparent position shift 
	vpar = kms[los_axis,:,:,:]
	z_obs= (1+redsh)/(1.-vpar/const.c)-1.
	dr = (1.+z_obs)*kms[los_axis,:,:,:]/const.Hz(z_obs)

	#Make the distorted box
	distbox = np.zeros((mx,my,mz))
	part_dT = np.zeros(mx*num_particles)

	last_percent = 0
	for i in range(my):
		percent_done = int(float(i)/float(my)*100)
		if percent_done%10 == 0 and percent_done != last_percent:
			print_msg('%d %%' % percent_done)
			last_percent = percent_done
		for j in range(mz):

			#Take a 1D slice from the dT box
			dT_slice = get_slice(dT,i,j)

			#Divide slice into particles
			partpos = np.linspace(0,conv.LB,mx*num_particles) #Positions before dist.
			for n in range(num_particles): #Assign dT to particles
				part_dT[n::num_particles] = dT_slice/float(num_particles)

			#Calculate and apply redshift distortions
			cell_length = conv.LB/float(mx)
			dr_slice_pad= get_slice(dr,i,j)
			np.insert(dr_slice_pad,0,dr_slice_pad[-1])
			dr_slice = get_interpolated_array(dr_slice_pad, len(partpos), 'linear')
			dr_slice = np.roll(dr_slice,num_particles/2)
			partpos += dr_slice

			#Boundary conditions
			partpos[np.where(partpos < 0)] += conv.LB
			partpos[np.where(partpos > conv.LB)] -= conv.LB

			#Regrid particles
			dist_slice = np.histogram(partpos, bins=np.linspace(0,conv.LB,mx+1), weights = part_dT)[0]
			if los_axis == 0:
				distbox[:,i,j] = dist_slice
			elif los_axis == 1:
				distbox[i,:,j] = dist_slice
			else:
				distbox[i,j,:] = dist_slice

	print_msg('Old dT (mean,var): %3f, %.3f' % ( dT.mean(), dT.var()) )
	print_msg('New (mean,var): %.3f, %.3f' % (distbox.mean(), distbox.var()) )
	return distbox
Beispiel #36
0
def get_distorted_dt(dT, kms, redsh, los_axis=0, num_particles=10):
    ''' 
	Apply peculiar velocity distortions to a differential
	temperature box, using the Mesh-Particle-Mesh method,
	as described in http://arxiv.org/abs/1303.5627
	
	Parameters:
		* dT (numpy array): the differential temperature box
		* kms (numpy array): velocity in km/s, array of dimensions 
			(3,mx,my,mz) where (mx,my,mz) is dimensions of dT
		* redsh (float): the redshift
		* los_axis = 0 (int): the line-of-sight axis (must be 0, 1 or 2)
		* num_particles = 10 (int): the number of particles to use per cell
			A higher number gives better accuracy, but worse performance.
		
	Returns:
		The redshift space box as a numpy array with same dimensions as dT.
		
	Example:
		Read a density file, a velocity file and an xfrac file, calculate the 
		brightness temperature, and convert it to redshift space.
		
		>>> vfile = c2t.VelocityFile('/path/to/data/8.515v_all.dat')
		>>> dfile = c2t.DensityFile('/path/to/data/8.515n_all.dat')
		>>> xfile = c2t.XfracFile('/path/to/data/xfrac3d_8.515.bin')
		>>> dT = c2t.calc_dt(xfile, dfile)
		>>> kms = vfile.get_kms_from_density(dfile)
		>>> dT_zspace = get_distorted_dt(dT, dfile, dfile.z, los_axis = 0)
	'''

    #Take care of different LOS axes
    assert (los_axis == 0 or los_axis == 1 or los_axis == 2)
    if los_axis == 0:
        get_slice = lambda data, i, j: data[:, i, j]
    elif los_axis == 1:
        get_slice = lambda data, i, j: data[i, :, j]
    else:
        get_slice = lambda data, i, j: data[i, j, :]

    #Dimensions
    mx, my, mz = dT.shape

    print_msg('Making velocity-distorted box...')
    print_msg('The redshift is %.3f' % redsh)
    print_msg('The box size is %.3f cMpc' % conv.LB)

    #Figure out the apparent position shift
    vpar = kms[los_axis, :, :, :]
    z_obs = (1 + redsh) / (1. - vpar / const.c) - 1.
    dr = (1. + z_obs) * kms[los_axis, :, :, :] / const.Hz(z_obs)

    #Make the distorted box
    distbox = np.zeros((mx, my, mz))
    part_dT = np.zeros(mx * num_particles)

    last_percent = 0
    for i in range(my):
        percent_done = int(float(i) / float(my) * 100)
        if percent_done % 10 == 0 and percent_done != last_percent:
            print_msg('%d %%' % percent_done)
            last_percent = percent_done
        for j in range(mz):

            #Take a 1D slice from the dT box
            dT_slice = get_slice(dT, i, j)

            #Divide slice into particles
            partpos = np.linspace(0, conv.LB,
                                  mx * num_particles)  #Positions before dist.
            for n in range(num_particles):  #Assign dT to particles
                part_dT[n::num_particles] = dT_slice / float(num_particles)

            #Calculate and apply redshift distortions
            cell_length = conv.LB / float(mx)
            dr_slice_pad = get_slice(dr, i, j)
            np.insert(dr_slice_pad, 0, dr_slice_pad[-1])
            dr_slice = get_interpolated_array(dr_slice_pad, len(partpos),
                                              'linear')
            dr_slice = np.roll(dr_slice, num_particles / 2)
            partpos += dr_slice

            #Boundary conditions
            partpos[np.where(partpos < 0)] += conv.LB
            partpos[np.where(partpos > conv.LB)] -= conv.LB

            #Regrid particles
            dist_slice = np.histogram(partpos,
                                      bins=np.linspace(0, conv.LB, mx + 1),
                                      weights=part_dT)[0]
            if los_axis == 0:
                distbox[:, i, j] = dist_slice
            elif los_axis == 1:
                distbox[i, :, j] = dist_slice
            else:
                distbox[i, j, :] = dist_slice

    print_msg('Old dT (mean,var): %3f, %.3f' % (dT.mean(), dT.var()))
    print_msg('New (mean,var): %.3f, %.3f' % (distbox.mean(), distbox.var()))
    return distbox
Beispiel #37
0
	def read_from_file(self,filename, min_select_mass = 0.0, max_select_mass = None, max_select_number=-1, 
			startline=0):
		'''
		Read a halo list.
		
		Parameters:
			* filename (string): The file to read from
			* min_select_mass = 0.0 (float): The lower threshold mass in solar masses.
				Only halos above this mass will be read.
			* max_select_mass = None (float): The upper threshold mass in solar masses.
				Only halos below this mass will be read. If None, there is no limit.
			* max_select_number = -1 (int): The max number of halos to read. If -1, there
				is no limit.
			* startline = 0 (int): The line in the file where reading will start.
		Returns:
			True if all the halos were read. False otherwise.
		'''

		self.halos = []

		print_msg('Reading halo file %s...' % filename)
		self.filename = filename
		import fileinput

		#Store the redshift from the filename
		import os.path
		name = os.path.split(filename)[1]
		self.z = float(name.split('halo')[0])

		#Read the file line by line, since it's large
		linenumber = 1
		min_select_grid_mass = min_select_mass/(conv.M_grid*const.solar_masses_per_gram)
		if max_select_mass:
			print_msg('Max_select_mass: %g' % max_select_mass)
			max_select_grid_mass = max_select_mass/(conv.M_grid*const.solar_masses_per_gram)

		for line in fileinput.input(filename):
			if linenumber < startline: #If you want to read from a particular line
				linenumber += 1
				continue
			if max_select_number >= 0 and len(self.halos) >= max_select_number:
				fileinput.close()
				return False
			if linenumber % 100000 == 0:
				print_msg('Read %d lines' % linenumber)
			linenumber += 1

			vals = line.split()
			grid_mass = float(vals[-3])

			#Create a halo and add it to the list
			if grid_mass > min_select_grid_mass and (max_select_mass == None or grid_mass < max_select_grid_mass):
				halo = Halo()
				halo.pos = np.array(map(float, vals[:3]))
				halo.pos_cm = np.array(map(float, vals[3:6]))
				halo.vel = np.array(map(float, vals[6:9]))
				halo.l = np.array(map(float, vals[9:12]))
				halo.vel_disp = float(vals[12])
				halo.r = float(vals[13])
				halo.m = float(vals[14])
				halo.mp = float(vals[15])
				halo.solar_masses = grid_mass*conv.M_grid*const.solar_masses_per_gram
				self.halos.append(halo)

		fileinput.close()

		return True
Beispiel #38
0
def make_lightcone(filenames, z_low = None, z_high = None, file_redshifts = None, \
                cbin_bits = 32, cbin_order = 'c', los_axis = 0, raw_density = False, interpolation='linear'):
    '''
    Make a lightcone from xfrac, density or dT data. Replaces freq_box.
    
    Parameters:
        * filenames (string or array): The coeval cubes. 
            Can be either any of the following:
            
                - An array with the file names
                
                - A text file containing the file names
                
                - The directory containing the files (must only contain 
                one type of files)
        * z_low (float): the lowest redshift. If not given, the redshift of the 
            lowest-z coeval cube is used.
        * z_high (float): the highest redshift. If not given, the redshift of the 
            highest-z coeval cube is used.
        * file_redshifts (string or array): The redshifts of the coeval cubes.
            Can be any of the following types:
            
            - None: determine the redshifts from file names
             
            - array: array containing the redshift of each coeval cube
            
            - filename: the name of a data file to read the redshifts from
            
        * cbin_bits (int): If the data files are in cbin format, you may specify 
            the number of bits.
        * cbin_order (char): If the data files are in cbin format, you may specify 
            the order of the data.
        * los_axis (int): the axis to use as line-of-sight for the coeval cubes
        * raw_density (bool): if this is true, and the data is a 
            density file, the raw (simulation units) density will be returned
            instead of the density in cgs units
        * interpolation (string): can be 'linear', 'step', 'sigmoid' or
            'step_cell'. 
            Determines how slices in between output redshifts are interpolated.
    Returns:
        (lightcone, z) tuple
        
        lightcone is the lightcone volume where the first two axes
        have the same size as the input cubes
        
        z is an array containing the redshifts along the line-of-sight
        
    .. note::
        If z_low is given, that redshift will be the lowest one included,
        even if there is no coeval box at exactly that redshift. This can 
        give results that are subtly different from results calculated with
        the old freq_box routine.
    '''
    
    if not interpolation in ['linear', 'step', 'sigmoid', 'step_cell']:
        raise ValueError('Unknown interpolation type: %s' % interpolation)
    
    #Figure out output redshifts, file names and size of output
    filenames = _get_filenames(filenames)
    file_redshifts = _get_file_redshifts(file_redshifts, filenames)
    assert len(file_redshifts) == len(filenames)
    mesh_size = get_mesh_size(filenames[0])
    
    output_z = _get_output_z(file_redshifts, z_low, z_high, mesh_size[0])

    #Make the output 32-bit to save memory 
    lightcone = np.zeros((mesh_size[0], mesh_size[1], len(output_z)), dtype='float32')
    
    comoving_pos_idx = 0
    z_bracket_low = None; z_bracket_high = None
    data_low = None; data_high = None
    
    #Make the lightcone, one slice at a time
    print_msg('Making lightcone between %f < z < %f' % (output_z.min(), output_z.max()))
    for z in output_z:
        z_bracket_low_new = file_redshifts[file_redshifts <= z].max()
        z_bracket_high_new = file_redshifts[file_redshifts > z].min()
        
        #Do we need a new file for the low z?
        if z_bracket_low_new != z_bracket_low:
            z_bracket_low = z_bracket_low_new
            file_idx = np.argmin(np.abs(file_redshifts - z_bracket_low))
            if data_high == None:
                data_low, datatype = get_data_and_type(filenames[file_idx], cbin_bits, cbin_order, raw_density)
            else: #No need to read the file again
                data_low = data_high
            
        #Do we need a new file for the high z?
        if z_bracket_high_new != z_bracket_high:
            z_bracket_high = z_bracket_high_new
            file_idx = np.argmin(np.abs(file_redshifts - z_bracket_high))
            data_high, datatype = get_data_and_type(filenames[file_idx], cbin_bits, cbin_order, raw_density)
        
        #Make the slice by interpolating, then move to next index
        data_interp = _get_interp_slice(data_high, data_low, z_bracket_high, \
                                    z_bracket_low, z, comoving_pos_idx, los_axis, interpolation)
        lightcone[:,:,comoving_pos_idx] = data_interp
        
        comoving_pos_idx += 1
        
    return lightcone, output_z
Beispiel #39
0
def get_distorted_dt(dT, kms, redsh, los_axis=0, velocity_axis = 0, num_particles=10, periodic=True):
    ''' 
    Apply peculiar velocity distortions to a differential
    temperature box, using the Mesh-Particle-Mesh method,
    as described in http://arxiv.org/abs/1303.5627
    
    Parameters:
        * dT (numpy array): the differential temperature box
        * kms (numpy array): velocity in km/s, array of dimensions 
            (3,mx,my,mz) where (mx,my,mz) is dimensions of dT
        * redsh (float): the redshift
        * los_axis = 0 (int): the line-of-sight axis of the output volume
            (must be 0, 1 or 2)
        * velocity_axis = 0 (int): the index that indicates los velocity
        * num_particles = 10 (int): the number of particles to use per cell
            A higher number gives better accuracy, but worse performance.
        * periodic = True (bool): whether or not to apply periodic boundary
            conditions along the line-of-sight. If you are making a lightcone
            volume, this should be False.
        
    Returns:
        The redshift space box as a numpy array with same dimensions as dT.
        
    Example:
        Read a density file, a velocity file and an xfrac file, calculate the 
        brightness temperature, and convert it to redshift space.
        
        >>> vfile = c2t.VelocityFile('/path/to/data/8.515v_all.dat')
        >>> dfile = c2t.DensityFile('/path/to/data/8.515n_all.dat')
        >>> xfile = c2t.XfracFile('/path/to/data/xfrac3d_8.515.bin')
        >>> dT = c2t.calc_dt(xfile, dfile)
        >>> kms = vfile.get_kms_from_density(dfile)
        >>> dT_zspace = get_distorted_dt(dT, kms, dfile.z, los_axis = 0)
        
    .. note::
        At the moment, it is a requirement that dimensions perpendicular to
        the line-of-sight are equal. For example, if the box dimensions are
        (mx, my, mz) and the line-of-sight is along the z axis, then mx
        has to be equal to my.
        
    .. note::
        If dT is a lightcone volume, los_axis is not necessarily the
        same as velocity_axis. The lightcone volume methods in c2raytools
        all give output volumes that have the line-of-sight as the last index,
        regardless of the line-of-sight axis. For these volumes, you should
        always use los_axis=2 and set velocity_axis equal to whatever was
        used when producing the real-space lightcones.
    
    '''
    #Volume dimensions
    mx,my,mz = dT.shape
    assert(mx == my or my == mz or mx == mz) #TODO: this should not be a requirement 
    grid_depth = dT.shape[los_axis]
    grid_width = dT.shape[(los_axis+1)%3]
    box_depth = grid_depth * (conv.LB/float(grid_width))

    #Take care of different LOS axes
    assert(los_axis == 0 or los_axis == 1 or los_axis == 2)
    if los_axis == 0:
        get_skewer = lambda data, i, j : data[:,i,j]
    elif los_axis == 1:
        get_skewer = lambda data, i, j : data[i,:,j]
    else:
        get_skewer = lambda data, i, j : data[i,j,:]

    #Input redshift can be a float or an array, but we need an array
    redsh = np.atleast_1d(redsh)

    print_msg('Making velocity-distorted box...')
    print_msg('The (min) redshift is %.3f' % redsh[0])
    print_msg('The box size is %.3f cMpc' % conv.LB)
    
    #Figure out the apparent position shift 
    vpar = kms[velocity_axis,:,:,:]
    z_obs = (1+redsh)/(1.-vpar/const.c)-1.
    dr = (1.+z_obs)*vpar/const.Hz(z_obs)

    #Make the distorted box
    distbox = np.zeros_like(dT)
    particle_dT = np.zeros(grid_depth*num_particles)

    last_percent = 0
    for i in range(grid_width):
        percent_done = int(float(i)/float(grid_width)*100)
        if percent_done%10 == 0 and percent_done != last_percent:
            print_msg('%d %%' % percent_done)
            last_percent = percent_done
        for j in range(grid_width):

            #Take a 1D skewer from the dT box
            dT_skewer = get_skewer(dT,i,j)

            #Create particles along the skewer and assign dT to the particles
            particle_pos = np.linspace(0, box_depth, grid_depth*num_particles)
            for n in range(num_particles): 
                particle_dT[n::num_particles] = dT_skewer/float(num_particles)

            #Calculate LOS velocity for each particle
            dr_skewer_pad = get_skewer(dr,i,j)
            np.insert(dr_skewer_pad, 0, dr_skewer_pad[-1])
            dr_skewer = get_interpolated_array(dr_skewer_pad, len(particle_pos), 'linear')
            
            #Apply velocity shift
            particle_pos += dr_skewer

            #Periodic boundary conditions
            if periodic:
                particle_pos[np.where(particle_pos < 0)] += box_depth
                particle_pos[np.where(particle_pos > box_depth)] -= box_depth

            #Regrid particles to original resolution
            dist_skewer = np.histogram(particle_pos, \
                                       bins=np.linspace(0, box_depth, grid_depth+1), \
                                       weights=particle_dT)[0]
            if los_axis == 0:
                distbox[:,i,j] = dist_skewer
            elif los_axis == 1:
                distbox[i,:,j] = dist_skewer
            else:
                distbox[i,j,:] = dist_skewer

    print_msg('Old dT (mean,var): %3f, %.3f' % ( dT.astype('float64').mean(), dT.var()) )
    print_msg('New dT (mean,var): %.3f, %.3f' % (distbox.mean(), distbox.var()) )
    return distbox