Beispiel #1
0
def convergenceVisualize(cmd_args,collection="c0",smooth=0.5*u.arcmin,fontsize=22):

	#Initialize plot
	fig,ax = plt.subplots(2,2,figsize=(16,16))

	#Load data
	cborn = ConvergenceMap.load(os.path.join(fiducial[collection].getMapSet("kappaBorn").home,"born_z2.00_0001r.fits"))
	cray = ConvergenceMap.load(os.path.join(fiducial[collection].getMapSet("kappa").home,"WLconv_z2.00_0001r.fits"))
	cll = ConvergenceMap.load(os.path.join(fiducial[collection].getMapSet("kappaLL").home,"postBorn2-ll_z2.00_0001r.fits"))
	cgp = ConvergenceMap.load(os.path.join(fiducial[collection].getMapSet("kappaGP").home,"postBorn2-gp_z2.00_0001r.fits"))

	#Smooth
	for c in (cborn,cray,cll,cgp):
		c.smooth(smooth,kind="gaussianFFT",inplace=True)

	#Plot
	cray.visualize(colorbar=True,fig=fig,ax=ax[0,0])
	(cray+cborn*-1).visualize(colorbar=True,fig=fig,ax=ax[0,1])
	cll.visualize(colorbar=True,fig=fig,ax=ax[1,0])
	cgp.visualize(colorbar=True,fig=fig,ax=ax[1,1])

	#Titles
	ax[0,0].set_title(r"$\kappa$",fontsize=fontsize)
	ax[0,1].set_title(r"$\kappa-\kappa^{(1)}$",fontsize=fontsize)
	ax[1,0].set_title(r"$\kappa^{(2-{\rm ll})}$",fontsize=fontsize)
	ax[1,1].set_title(r"$\kappa^{(2-{\rm gp})}$",fontsize=fontsize)

	#Switch off grids
	for i in (0,1):
		for j in (0,1):
			ax[i,j].grid(b=False) 

	#Save
	fig.tight_layout()
	fig.savefig("{0}/csample.{0}".format(cmd_args.type))
Beispiel #2
0
    def wl_peak_counts(
        self,
        nbins: int,
        field_conversion: str,
        of: str = "orig",
        limits: Optional[tuple] = None,
    ) -> pd.DataFrame:
        """
        Signal peak counts. This is used commonly used in weak-lensing,
        but it doesn't need to stop there...
        """
        if field_conversion == "normalize":
            _map = self.data[of] - np.mean(self.skymap.data[of])
        else:
            _map = self.data[of]

        if limits is None:
            lower_bound = np.percentile(self.data[of],
                                        5)  # np.min(self.data[of])
            upper_bound = np.percentile(self.data[of],
                                        95)  # np.max(self.data[of])
        else:
            lower_bound = min(limits)
            upper_bound = max(limits)

        map_bins = np.arange(lower_bound, upper_bound,
                             (upper_bound - lower_bound) / nbins)
        _map = ConvergenceMap(data=_map, angle=self._opening_angle * un.deg)
        _kappa, _pos = _map.locatePeaks(map_bins)

        _hist, _kappa = np.histogram(_kappa, bins=nbins, density=False)
        _kappa = (_kappa[1:] + _kappa[:-1]) / 2
        peak_counts_dic = {"kappa": _kappa, "counts": _hist}
        peak_counts_df = pd.DataFrame(data=peak_counts_dic)
        return peak_counts_df
Beispiel #3
0
 def from_array(
     cls,
     skymap: Type[SkyArray],
     on: str,
     multipoles: Union[List[float],
                       np.array] = np.arange(200.0, 50000.0, 200.0),
 ) -> "PowerSpectrum2D":
     """
     Args:
     """
     _map = ConvergenceMap(data=skymap.data[on],
                           angle=skymap.opening_angle * un.deg)
     l, P = _map.powerSpectrum(multipoles)
     return cls(l, P)
Beispiel #4
0
    def from_sky(
        cls,
        skymap: Type[SkyArray],
        on: str,
        bin_dsc: dict,
        kernel_width: float = 5,
        direction: int = 1,
        filters: bool = True,
    ) -> "Dipoles":
        """
        Find peaks on the dipole signal map. It is assumed that the convergence maps
        were created with astrild.rays.visuals.map and filter with:
            I) high-pass II) DGD3 III) low-pass gaussian filters.

        Args:
            kernel_width: Smoothing kernel with [arcmin]
        Returns:
        """
        if filters is True:
            skymap = cls._filter(skymap, kernel_width, direction)

        thresholds = cls._get_convergence_thresholds(
            sky_array=skymap.data[bin_dsc["on"]], nbins=bin_dsc["nbins"])

        _map = ConvergenceMap(data=skymap.data[on],
                              angle=skymap.opening_angle * un.deg)
        deltaT, pos_deg = _map.locatePeaks(thresholds)
        deltaT, pos_deg = cls._remove_peaks_crossing_edge(
            skymap.npix, skymap.opening_angle, kernel_width, deltaT, pos_deg)
        assert len(deltaT) != 0, "No peaks"
        peak_dir = {
            "deltaT": deltaT,
            "x_deg": pos_deg[:, 0],
            "y_deg": pos_deg[:, 1],
        }

        # find significance of peaks
        peak_dir["snr"] = cls._signal_to_noise_ratio(peak_dir["deltaT"],
                                                     _map.data)
        peak_dir["x_pix"] = np.rint(peak_dir["x_deg"] * skymap.npix /
                                    skymap.opening_angle).astype(int)
        peak_dir["y_pix"] = np.rint(peak_dir["y_deg"] * skymap.npix /
                                    skymap.opening_angle).astype(int)
        peak_df = pd.DataFrame(data=peak_dir)
        # attrs is experimental and may change without warning.
        peak_df.attrs["map_file"] = skymap.map_file
        peak_df.attrs["filters"] = filters
        peak_df.attrs["kernel_width"] = kernel_width
        return cls.from_dataframe(peak_df)
Beispiel #5
0
def convergence_stats(cmd_args):

    #Plot setup
    fig, ax = plt.subplots()

    #Load the convergence map and smooth on 1 arcmin
    conv = ConvergenceMap.load(os.path.join(dataExtern(), "conv1.fit"))
    conv.smooth(1.0 * u.arcmin, kind="gaussianFFT", inplace=True)

    #Find the peak locations and height
    sigma = np.linspace(-2., 11., 101)

    #Show the peak histogram and the PDF
    conv.peakHistogram(sigma, norm=True, fig=fig, ax=ax)

    ax_right = ax.twinx()
    conv.plotPDF(sigma, norm=True, fig=fig, ax=ax_right, color="red")

    #All PDF quantities are shown in red
    ax_right.spines["right"].set_color("red")
    ax_right.tick_params(axis="y", colors="red")
    ax_right.yaxis.label.set_color("red")

    #Save
    fig.savefig("convergence_stats." + cmd_args.type)
Beispiel #6
0
def test_power():

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	l_edges = np.arange(200.0,50000.0,200.0)
	
	fig,ax = plt.subplots()

	l,P_original = conv_map.powerSpectrum(l_edges)
	l,P_masked = (conv_map*mask_profile).powerSpectrum(l_edges)
	l,P_mask = mask_profile.powerSpectrum(l_edges)

	ax.plot(l,l*(l+1)*P_original/(2*np.pi),label="Unmasked")
	ax.plot(l,l*(l+1)*P_masked/(2*np.pi),label="Masked")
	ax.plot(l,l*(l+1)*P_masked/(2*np.pi*(1.0 - mask_profile.maskedFraction)**2),label="Re-scaled")
	ax.plot(l,l*(l+1)*P_mask/(2*np.pi),label="Mask")
	ax.set_xscale("log")
	ax.set_yscale("log")
	ax.set_xlabel(r"$l$")
	ax.set_ylabel(r"$l(l+1)P_l/2\pi$")

	ax.legend(loc="lower left")

	plt.savefig("power_mask.png")
	plt.clf()
Beispiel #7
0
def compute_histograms(map_id,simulation_set,smoothing_scales,index,generator,bin_edges):

	assert len(index.descriptor_list) == len(smoothing_scales)

	z = 1.0

	#Get map name to analyze
	map_name = simulation_set.getNames(z=z,realizations=[map_id])[0]

	#Load the convergence map
	convergence_map = ConvergenceMap.load(map_name)

	#Generate the shape noise map
	noise_map = generator.getShapeNoise(z=z,ngal=15.0*arcmin**-2,seed=map_id)

	#Add the noise
	convergence_map += noise_map

	#Measure the features
	hist_output = np.zeros(index.size)
	for n,descriptor in enumerate(index.descriptor_list):

		logging.debug("Processing {0} x {1}".format(map_name,smoothing_scales[n]))

		smoothed_map = convergence_map.smooth(smoothing_scales[n])
		v,hist_output[descriptor.first:descriptor.last] = smoothed_map.pdf(bin_edges)

	#Return the histograms in array format
	return hist_output
Beispiel #8
0
def compute_histograms(map_id, simulation_set, smoothing_scales, index,
                       generator, bin_edges):

    assert len(index.descriptor_list) == len(smoothing_scales)

    z = 1.0

    #Get map name to analyze
    map_name = simulation_set.getNames(z=z, realizations=[map_id])[0]

    #Load the convergence map
    convergence_map = ConvergenceMap.load(map_name)

    #Generate the shape noise map
    noise_map = generator.getShapeNoise(z=z,
                                        ngal=15.0 * arcmin**-2,
                                        seed=map_id)

    #Add the noise
    convergence_map += noise_map

    #Measure the features
    hist_output = np.zeros(index.size)
    for n, descriptor in enumerate(index.descriptor_list):

        logging.debug("Processing {0} x {1}".format(map_name,
                                                    smoothing_scales[n]))

        smoothed_map = convergence_map.smooth(smoothing_scales[n])
        v, hist_output[descriptor.first:descriptor.last] = smoothed_map.pdf(
            bin_edges)

    #Return the histograms in array format
    return hist_output
Beispiel #9
0
def excursion(cmd_args,smooth=0.5*u.arcmin,threshold=0.02,fontsize=22):

	#Set up plot
	fig,ax = plt.subplots(1,2,figsize=(16,8))

	#Load map
	conv = ConvergenceMap.load(os.path.join(fiducial["c0"].getMapSet("kappa").home,"WLconv_z2.00_0001r.fits"))
	conv.smooth(smooth,kind="gaussianFFT",inplace=True)

	#Build excursion set
	exc_data = np.zeros_like(conv.data)
	exc_data[conv.data>threshold] = 1.
	exc = ConvergenceMap(exc_data,angle=conv.side_angle)

	#Define binary colorbar
	cmap = plt.get_cmap("RdBu")
	cmaplist = [ cmap(i) for i in range(cmap.N) ]
	cmap = cmap.from_list("binary map",cmaplist,cmap.N)
	bounds = np.array([0.0,0.5,1.0])
	norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)

	#Plot the two alongside
	conv.visualize(colorbar=True,cbar_label=r"$\kappa$",fig=fig,ax=ax[0])
	exc.visualize(colorbar=True,cmap="binary",norm=norm,fig=fig,ax=ax[1])

	#Overlay boundary on the image
	mask = conv.mask(exc_data.astype(np.int8))
	i,j = np.where(mask.boundary>0)
	scale = conv.resolution.to(u.deg).value
	ax[0].scatter(j*scale,i*scale,color="red",marker=".",s=0.5)
	ax[0].set_xlim(0,conv.side_angle.to(u.deg).value)
	ax[0].set_ylim(0,conv.side_angle.to(u.deg).value)

	#Format right colorbar
	cbar = exc.ax.get_images()[0].colorbar
	cbar.outline.set_linewidth(1)
	cbar.outline.set_edgecolor("black")
	cbar_ticks = cbar.set_ticks([0,0.25,0.5,0.75,1])
	cbar.ax.set_yticklabels(["",r"$\kappa<\kappa_0$","",r"$\kappa>\kappa_0$",""],rotation=90)

	#Save
	fig.tight_layout()
	fig.savefig("{0}/excursion.{0}".format(cmd_args.type))
Beispiel #10
0
def test_cross():

	#Load
	conv1 = ConvergenceMap.load("Data/conv1.fit")
	conv2 = ConvergenceMap.load("Data/conv2.fit")

	#Cross
	l,Pl = conv1.cross(conv2,l_edges=l_edges)

	#Visualize
	fig,ax = plt.subplots()
	ax.plot(l,np.abs(l*(l+1)*Pl/(2.0*np.pi)))
	ax.set_xscale("log")
	ax.set_yscale("log")
	ax.set_xlabel(r"$l$")
	ax.set_ylabel(r"$l(l+1)P_l/2\pi$")

	plt.savefig("cross_spectrum.png")
	plt.clf()
Beispiel #11
0
def test_cross():

    #Load
    conv1 = ConvergenceMap.load("Data/conv1.fit")
    conv2 = ConvergenceMap.load("Data/conv2.fit")

    #Cross
    l, Pl = conv1.cross(conv2, l_edges=l_edges)

    #Visualize
    fig, ax = plt.subplots()
    ax.plot(l, np.abs(l * (l + 1) * Pl / (2.0 * np.pi)))
    ax.set_xscale("log")
    ax.set_yscale("log")
    ax.set_xlabel(r"$l$")
    ax.set_ylabel(r"$l(l+1)P_l/2\pi$")

    plt.savefig("cross_spectrum.png")
    plt.clf()
Beispiel #12
0
def measure_from_IGS1(filename):

	#Read in the map
	logging.debug("Processing IGS1 map {0}".format(filename))
	conv_map = ConvergenceMap.load(filename)

	#Smooth 1 arcmin
	conv_map.smooth(1.0*arcmin,inplace=True)

	#Measure the moments
	return conv_map.moments(connected=True,dimensionless=True)
Beispiel #13
0
def measure_from_IGS1(filename):

    #Read in the map
    logging.debug("Processing IGS1 map {0}".format(filename))
    conv_map = ConvergenceMap.load(filename)

    #Smooth 1 arcmin
    conv_map.smooth(1.0 * arcmin, inplace=True)

    #Measure the moments
    return conv_map.moments(connected=True, dimensionless=True)
Beispiel #14
0
def convergence_measure_all(filename,index,mean_subtract,smoothing_scale=None):

	"""
	Measures all the statistical descriptors of a convergence map as indicated by the index instance
	
	"""

	logging.info("Processing {0}".format(filename))

	#Load the map
	conv_map = ConvergenceMap.load(filename)

	if mean_subtract:
		conv_map.data -= conv_map.mean()

	#Smooth the map maybe
	if smoothing_scale is not None:
		logging.info("Smoothing {0} on {1}".format(filename,smoothing_scale))
		conv_map.smooth(smoothing_scale,kind="gaussianFFT",inplace=True)

	#Allocate memory for observables
	descriptors = index
	observables = np.zeros(descriptors.size)

	#Measure descriptors as directed by input
	for n in range(descriptors.num_descriptors):
		
		if type(descriptors[n]) == PowerSpectrum:
			l,observables[descriptors[n].first:descriptors[n].last] = conv_map.powerSpectrum(descriptors[n].l_edges)
			
		elif type(descriptors[n]) == Moments:
			observables[descriptors[n].first:descriptors[n].last] = conv_map.moments(connected=descriptors[n].connected)
			
		elif type(descriptors[n]) == Peaks:
			v,observables[descriptors[n].first:descriptors[n].last] = conv_map.peakCount(descriptors[n].thresholds,norm=descriptors[n].norm)

		elif type(descriptors[n]) == PDF:
			v,observables[descriptors[n].first:descriptors[n].last] = conv_map.pdf(descriptors[n].thresholds,norm=descriptors[n].norm)
		
		elif type(descriptors[n]) == MinkowskiAll:
			v,V0,V1,V2 = conv_map.minkowskiFunctionals(descriptors[n].thresholds,norm=descriptors[n].norm)
			observables[descriptors[n].first:descriptors[n].last] = np.hstack((V0,V1,V2))
		
		elif type(descriptors[n]) == MinkowskiSingle:
			raise ValueError("Due to computational performance you have to measure all Minkowski functionals at once!")
		
		else:
			
			raise ValueError("Measurement of this descriptor not implemented!!!")

	#Return
	return observables
Beispiel #15
0
    def find_peaks(
        self,
        on: str,
        field_conversion: str,
        thresholds_dsc: dict,
        snr_sigma: Optional[float] = None,
        save: bool = False,
    ) -> None:
        """
        Find peaks on convergence map. It is assumed that the convergence maps
        were created with astrild.rays.visuals.map and have appropriate
        smoothing and galaxy shape noise.

        Args:
        Returns:
        """
        self.on = on
        if field_conversion == "normalize":
            _map = self.skymap.data[on] - np.mean(self.skymap.data[on])
        else:
            _map = self.skymap.data[on]

        thresholds = self._get_convergence_thresholds(**thresholds_dsc)

        _map = ConvergenceMap(data=_map,
                              angle=self.skymap.opening_angle * un.deg)
        _peaks = {}
        _peaks["kappa"], _peaks["pos"] = _map.locatePeaks(thresholds)
        _peaks["kappa"], _peaks["pos"] = self._remove_peaks_crossing_edge(
            **_peaks)
        assert len(_peaks["kappa"]) != 0, "No peaks"

        # find significance of peaks
        _peaks["snr"] = self._signal_to_noise_ratio(_peaks["kappa"], _map.data,
                                                    snr_sigma)
        self.peaks = _peaks
        if save:
            # IO.save()
            pass
Beispiel #16
0
def test_moments():

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	masked_map = conv_map.mask(mask_profile)

	#Compute the moments in both masked and unmasked cases
	mom_original = conv_map.moments(connected=True)
	mom_masked = masked_map.moments(connected=True)
	rel_difference = np.abs(mom_masked/mom_original - 1.0)

	#Save the values and relative differences to file
	np.savetxt("masked_moments.txt",np.array([mom_original,mom_masked,rel_difference]),fmt="%.2e")
Beispiel #17
0
def test_visualize():

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	masked_fraction = conv_map.mask(mask_profile,inplace=True)

	fig,ax = plt.subplots(1,2,figsize=(16,8))
	mask_profile.visualize(fig,ax[0],cmap=plt.cm.binary)
	conv_map.visualize(fig,ax[1])

	ax[0].set_title("Mask")
	ax[1].set_title("Masked map: masking fraction {0:.2f}".format(masked_fraction))

	fig.tight_layout()
	fig.savefig("mask.png")
def measure_all_histograms(models,options,pool):

	#Look at a sample map
	sample_map = ConvergenceMap.fromfilename(models[0].getNames(z=1.0,realizations=[1])[0],loader=load_fits_default_convergence)
	#Initialize Gaussian shape noise generator for the sample map shape and angle
	generator = GaussianNoiseGenerator.forMap(sample_map)

	#Parsed from options
	num_realizations = options.getint("analysis","num_realizations")
	smoothing_scales = [float(scale) for scale in options.get("analysis","smoothing_scales").split(",")]
	bin_edges = np.ogrid[options.getfloat("analysis","bin_edge_low"):options.getfloat("analysis","bin_edge_high"):(options.getint("analysis","num_bins") - 2)*1j]
	bin_edges = np.hstack((-10.0,bin_edges,10.0))
	z = options.getfloat("analysis","redshift")

	bin_midpoints = 0.5*(bin_edges[1:] + bin_edges[:-1])
	

	#Create smoothing scale index for the histograms
	idx = Indexer.stack([PDF(bin_edges) for scale in smoothing_scales])

	#Build the data type of the structure array in output
	data_type = [(model.name,Ensemble) for model in models]
	#Append info about the smoothing scale
	data_type = [("Smooth",np.float),] + data_type

	#Create output struct array
	ensemble_array = np.zeros(len(smoothing_scales),dtype=data_type)

	#Write smoothing scale information
	ensemble_array["Smooth"] = np.array(smoothing_scales)
	
	#The for loop runs the distributed computations
	for model in models:

		#Build Ensemble instance with the maps to analyze
		map_ensemble = Ensemble.fromfilelist(range(1,num_realizations+1))
		
		#Measure the histograms and load the data in the ensemble
		map_ensemble.load(callback_loader=compute_map_histograms,pool=pool,simulation_set=model,smoothing_scales=smoothing_scales,index=idx,generator=generator,bin_edges=bin_edges,redshift=z)

		#Split the ensemble between different smoothing scales
		map_ensemble_list = map_ensemble.split(idx)

		#Add to output struct array
		ensemble_array[model.name] = np.array(map_ensemble_list)

	return ensemble_array
Beispiel #19
0
def test_minkowski():

	th_minkowski = np.ogrid[-0.15:0.15:50j]

	#Set up plots
	fig,ax = plt.subplots(1,3,figsize=(24,8))

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	#Compute and plot the MFs for the unmasked map
	v,V0,V1,V2 = conv_map.minkowskiFunctionals(th_minkowski)
	ax[0].plot(v,V0,label="Unmasked")
	ax[1].plot(v,V1,label="Unmasked")
	ax[2].plot(v,V2,label="Unmasked")

	#Compute and plot the MFs for the masked, zero padded mask
	v,V0,V1,V2 = (conv_map*mask_profile).minkowskiFunctionals(th_minkowski)
	ax[0].plot(v,V0,linestyle="--",label="Zero padded")
	ax[1].plot(v,V1,linestyle="--",label="Zero padded")
	ax[2].plot(v,V2,linestyle="--",label="Zero padded")

	#Compute and plot the MFs for the masked map
	masked_fraction = conv_map.mask(mask_profile,inplace=True)
	v,V0,V1,V2 = conv_map.minkowskiFunctionals(th_minkowski)
	ax[0].plot(v,V0,label="Masked {0:.1f}%".format(masked_fraction*100))
	ax[1].plot(v,V1,label="Masked {0:.1f}%".format(masked_fraction*100))
	ax[2].plot(v,V2,label="Masked {0:.1f}%".format(masked_fraction*100))

	#Labels
	ax[0].set_xlabel(r"$\kappa$")
	ax[0].set_ylabel(r"$V_0(\kappa)$")

	ax[1].set_xlabel(r"$\kappa$")
	ax[1].set_ylabel(r"$V_1(\kappa)$")

	ax[2].set_xlabel(r"$\kappa$")
	ax[2].set_ylabel(r"$V_2(\kappa)$")

	ax[0].legend(loc="upper right")

	fig.tight_layout()

	plt.savefig("masked_minkowski.png")
	plt.clf()
Beispiel #20
0
    def gaussian(
        img: np.ndarray,
        theta: un.quantity.Quantity,
        theta_i: Optional[un.quantity.Quantity] = None,
        fwhm_i: Optional[un.quantity.Quantity] = None,
        **kwargs,
    ) -> np.ndarray:
        """
        Gaussian filter for low-pass filter to get rid of long-wavelenths
        (e.g. CMB when interested in ISW-RS signals).
        Note: It is based on lenstools.ConvergenceMap.smooth function,
            but it has nothing in particular todo with convergence maps,
            and can be used for any other map, e.g. isw_rs.

        Args:
            img: partial sky-map
            theta: edge-lenght of field-of-view [deg]
            theta_i: sigma of gaussian used to indicated
                smoothing kernel width, [arcmin]
            fwhm_i: Full-Width-Half-Maximum (fwhm) of gaussian used to indicated
                smoothing kernel width, [arcmin]

        Returns:
        """
        img = ConvergenceMap(data=img, angle=theta.to(un.deg))
        fwhm_i = fwhm_i.to(un.arcmin).value
        if theta_i is None and fwhm_i is None:
            raise ValueError(f"Either theta_i or fwhm_i must be set for smoothing scale.")
        elif theta_i is None:
            sigma_i = Filters.fwhm_to_sigma(fwhm_i)
        else:
            sigma_i = theta_i

        sigma_i *= un.arcmin
        if len(img.data) < 500:
            # for image with less than 500^2 images real-space is faster
            img = img.smooth(
                scale_angle=sigma_i, kind="gaussian", **kwargs
            )
        else:
            # for larger images FFT is optimal
            img = img.smooth(
                scale_angle=sigma_i, kind="gaussianFFT", **kwargs
            )
        return img.data
Beispiel #21
0
def test_boundaries():

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	masked_map = conv_map.mask(mask_profile)
	assert hasattr(masked_map,"_mask")
	assert masked_map._masked
	assert masked_map.side_angle == conv_map.side_angle
	assert masked_map.data.shape == conv_map.data.shape

	#Compute boundaries
	perimeter_area = masked_map.maskBoundaries()

	fig,ax = plt.subplots(1,3,figsize=(24,8))

	#Plot gradient boundary
	ax[0].imshow(masked_map._gradient_boundary,origin="lower",cmap=plt.cm.binary,interpolation="nearest",extent=[0,conv_map.side_angle.value,0,conv_map.side_angle.value])

	#Plot hessian (but not gradient) boundary
	ax[1].imshow(masked_map._gradient_boundary ^ masked_map._hessian_boundary,origin="lower",cmap=plt.cm.binary,interpolation="nearest",extent=[0,conv_map.side_angle.value,0,conv_map.side_angle.value])

	#Plot gradient and hessian boundary
	ax[2].imshow(masked_map.boundary,origin="lower",cmap=plt.cm.binary,interpolation="nearest",extent=[0,conv_map.side_angle.value,0,conv_map.side_angle.value])

	ax[0].set_xlabel(r"$x$(deg)")
	ax[0].set_ylabel(r"$y$(deg)")
	ax[0].set_title("Gradient boundary")

	ax[1].set_xlabel(r"$x$(deg)")
	ax[1].set_ylabel(r"$y$(deg)")
	ax[1].set_title("Hessian overhead")

	ax[2].set_xlabel(r"$x$(deg)")
	ax[2].set_ylabel(r"$y$(deg)")
	ax[2].set_title("Full boundary: perimeter/area={0:.3f}".format(perimeter_area))

	fig.tight_layout()

	plt.savefig("boundaries.png")
	plt.clf()
Beispiel #22
0
def convergencePeaks(cmd_args,fontsize=22):

	#Plot setup
	fig,ax = plt.subplots(1,2,figsize=(16,8))

	#Load the convergence map and smooth on 0.5 arcmin
	conv = ConvergenceMap.load(os.path.join(fiducial["c0"].getMapSet("kappa").home,"WLconv_z2.00_0001r.fits"))
	conv.smooth(0.5*u.arcmin,kind="gaussianFFT",inplace=True)

	#Find the peak locations and height
	sigma = np.linspace(-2.,13.,101)
	height,positions = conv.locatePeaks(sigma,norm=True)

	#Show the convergence with the peak locations
	conv.visualize(fig=fig,ax=ax[0],colorbar=True,cbar_label=r"$\kappa$")
	ax[0].scatter(*positions[height>2.].to(u.deg).value.T,color="red",marker="o")
	ax[0].set_xlim(0,conv.side_angle.to(u.deg).value)
	ax[0].set_ylim(0,conv.side_angle.to(u.deg).value)

	#Build a gaussianized version of the map
	gen = GaussianNoiseGenerator.forMap(conv)
	ell = np.linspace(conv.lmin,conv.lmax,100)
	ell,Pell = conv.powerSpectrum(ell)
	convGauss = gen.fromConvPower(np.array([ell,Pell]),bounds_error=False,fill_value=0.)

	#Show the peak histogram (measured + gaussian)
	conv.peakHistogram(sigma,norm=True,fig=fig,ax=ax[1],label=r"${\rm Measured}$")
	convGauss.peakHistogram(sigma,norm=True,fig=fig,ax=ax[1],label=r"${\rm Gaussianized}$")
	conv.gaussianPeakHistogram(sigma,norm=True,fig=fig,ax=ax[1],label=r"${\rm Prediction}:(dN_{\rm pk}/d\nu)_G$")

	#Limits
	ax[1].set_ylim(1,1.0e3)

	#Labels
	ax[1].set_xlabel(r"$\kappa/\sigma_0$",fontsize=fontsize)
	ax[1].set_ylabel(r"$dN_{\rm pk}(\kappa)$")
	ax[1].legend()

	#Save
	fig.tight_layout()
	fig.savefig("{0}/convergencePeaks.{0}".format(cmd_args.type))
Beispiel #23
0
def test_convergence_direct():

	z_final = 2.0

	#Start a bucket of light rays from these positions
	b = np.linspace(0.0,tracer.lens[0].side_angle.to(deg).value,512)
	xx,yy = np.meshgrid(b,b)
	pos = np.array([xx,yy]) * deg

	#Compute the convergence
	conv = tracer.convergenceDirect(pos,z=z_final)

	#Wrap into a ConvergenceMap and visualize
	conv_map = ConvergenceMap(data=conv,angle=tracer.lens[0].side_angle)
	conv_map.visualize(colorbar=True)
	conv_map.savefig("convergence_direct.png")
Beispiel #24
0
    def from_file(
        cls,
        map_file: str,
        opening_angle: float,
        quantity: str,
        dir_in: str,
        npix: Optional[int] = None,
        convert_unit: bool = True,
    ) -> "SkyArray":
        """
        Initialize class by reading the skymap data from pandas hdf5 file
        or numpy array.
        The file can be pointed at via map_filename or file_dsc.

        Args:
            map_filename:
                File path with which skymap pd.DataFrame can be loaded.
            opening_angle: [deg]
        """
        assert map_file, "There is no file being pointed at"

        file_extension = map_file.split(".")[-1]
        if file_extension == "h5":
            map_df = pd.read_hdf(map_file, key="df")
            return cls.from_dataframe(
                map_df,
                opening_angle,
                quantity,
                dir_in,
                map_file,
                npix,
                convert_unit,
            )
        elif file_extension in ["npy", "fits"]:
            if file_extension == "npy":
                map_array = np.load(map_file)
            elif file_extension == "fits":
                map_array = ConvergenceMap.load(map_file, format="fits").data
            return cls.from_array(map_array, opening_angle, quantity, dir_in,
                                  map_file)
Beispiel #25
0
def main(datapath, output_file):
    ell = np.logspace(np.log10(500), np.log10(5000), 38)  # multipole bin edges
    data = pd.DataFrame(columns=cosmology+ell_bins)
    data.to_csv(output_file) # save columns to file
    # make a single row to receive data
    data = data.append(pd.DataFrame(
        np.zeros(len(cosmology) + len(ell_bins)).reshape(1, -1),
        columns=cosmology+ell_bins))
    for _, dirs, _ in os.walk(datapath):
        for d in dirs:
            for root, _, files in os.walk(os.path.join(datapath, d)):  
                for conv_map_fit in files:
                    try:
                        conv_map = ConvergenceMap.load(os.path.join(root,conv_map_fit))
                        hdul = fits.open(os.path.join(root, conv_map_fit))[0].header
                        for c_param in cosmology:
                            data[c_param] = hdul[c_param]
                        l, power_spectrum = conv_map.powerSpectrum(ell)
                        data[ell_bins] = power_spectrum
                        data.to_csv(output_file, mode="a", header=False)
                    except:
                        continue
Beispiel #26
0
def test_pdf():

	th_pdf = np.ogrid[-0.15:0.15:50j]

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	masked_map = conv_map.mask(mask_profile)

	v,p_original = conv_map.pdf(th_pdf)
	v,p_masked = masked_map.pdf(th_pdf)

	#Plot the two histograms
	plt.plot(v,p_original,label="Unmasked")
	plt.plot(v,p_masked,label="Masked {0:.1f}%".format(mask_profile.maskedFraction * 100))

	#Labels
	plt.xlabel(r"$\kappa$")
	plt.ylabel(r"$P(\kappa)$")
	plt.legend(loc="upper right")

	plt.savefig("masked_pdf.png")
	plt.clf()
def compute_map_histograms(args):

	assert "map_id" in args.keys()
	assert "simulation_set" in args.keys()
	assert "smoothing_scales" in args.keys()
	assert "redshift" in args.keys()
	assert "index" in args.keys()
	assert "generator" in args.keys()
	assert "bin_edges" in args.keys()

	assert len(args["index"].descriptor_list) == len(args["smoothing_scales"])

	z = args["redshift"]

	#Get map name to analyze
	map_name = args["simulation_set"].getNames(z=z,realizations=[args["map_id"]])[0]

	#Load the convergence map
	convergence_map = ConvergenceMap.fromfilename(map_name,loader=load_fits_default_convergence)

	#Generate the shape noise map
	noise_map = args["generator"].getShapeNoise(z=z,ngal=15.0,seed=args["map_id"])

	#Add the noise
	convergence_map += noise_map

	#Measure the features
	hist_output = np.zeros(args["index"].size)
	for n,descriptor in enumerate(args["index"].descriptor_list):

		logging.debug("Processing {0} x {1} arcmin".format(map_name,args["smoothing_scales"][n]))

		smoothed_map = convergence_map.smooth(args["smoothing_scales"][n])
		v,hist_output[descriptor.first:descriptor.last] = smoothed_map.pdf(args["bin_edges"])

	#Return the histograms in array format
	return hist_output
Beispiel #28
0
def convergence_visualize(cmd_args):

    #Plot setup
    fig, ax = plt.subplots()

    #Load the convergence map and smooth on 1 arcmin
    conv = ConvergenceMap.load(os.path.join(dataExtern(), "conv1.fit"))
    conv.smooth(1.0 * u.arcmin, kind="gaussianFFT", inplace=True)

    #Find the peak locations and height
    sigma_peaks = np.linspace(-2., 11., 101)
    height, positions = conv.locatePeaks(sigma_peaks, norm=True)

    #Show the map and the peaks on it (left panel)
    conv.visualize(fig=fig, ax=ax, colorbar=True, cbar_label=r"$\kappa$")
    ax.scatter(*positions[height > 2.].to(u.deg).value.T,
               color="black",
               marker="x")
    ax.set_xlim(0, conv.side_angle.to(u.deg).value)
    ax.set_ylim(0, conv.side_angle.to(u.deg).value)

    #Save the figure
    fig.tight_layout()
    fig.savefig("convergence_visualize." + cmd_args.type)
Beispiel #29
0
def test_peak_locations():

	th_peaks = np.arange(0.24,0.5,0.01)

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	masked_map = conv_map.mask(mask_profile)

	#Locate the peaks on the map
	values,location = masked_map.locatePeaks(th_peaks)

	#Visualize the map and the peak locations
	fig,ax = plt.subplots(1,2,figsize=(16,8))
	masked_map.visualize(fig=fig,ax=ax[0],colorbar=True)
	masked_map.visualize(fig=fig,ax=ax[1])

	ax[1].scatter(location[:,0].value,location[:,1].value,color="black")
	ax[1].set_xlim(0.0,masked_map.side_angle.value)
	ax[1].set_ylim(0.0,masked_map.side_angle.value)

	#Save the figure
	fig.tight_layout()
	fig.savefig("masked_peak_locations.png")
Beispiel #30
0
def test_peaks():

	th_peaks = np.ogrid[-0.04:0.12:50j]

	conv_map = ConvergenceMap.load("Data/unmasked.fit")
	mask_profile = Mask.load("Data/mask.fit")

	masked_map = conv_map.mask(mask_profile)

	v,pk_orig = conv_map.peakCount(th_peaks)
	v,pk_masked = masked_map.peakCount(th_peaks)

	#Plot the difference
	plt.plot(v,pk_orig,label=r"Unmasked: $N_p=${0}".format(int(integrate.simps(pk_orig,x=v))))
	plt.plot(v,pk_masked,label=r"With {0:.1f}% area masking: $N_p=${1}".format(mask_profile.maskedFraction * 100,int(integrate.simps(pk_masked,x=v))))
	plt.plot(v,pk_masked/(1.0 - mask_profile.maskedFraction),label="Re-scaled")

	#Labels
	plt.xlabel(r"$\kappa$")
	plt.ylabel(r"$dN/d\kappa$")
	plt.legend(loc="upper left")

	plt.savefig("masked_peaks.png")
	plt.clf()
Beispiel #31
0
	root_path = cmd_args.path
	num_realizations = cmd_args.num_realizations
	
	#Smoothing scales in arcmin
	smoothing_scales = [ theta*arcmin for theta in [0.1,0.5,1.0,2.0] ]
	bin_edges = np.ogrid[-0.15:0.15:128j]
	bin_midpoints = 0.5*(bin_edges[1:] + bin_edges[:-1])
	
	#Create smoothing scale index for the histogram
	idx = Indexer.stack([PDF(bin_edges) for scale in smoothing_scales])
	
	#Create IGS1 simulation set object to look for the right simulations
	simulation_set = IGS1(root_path=root_path)
	
	#Look at a sample map
	sample_map = ConvergenceMap.load(simulation_set.getNames(z=1.0,realizations=[1])[0])
	
	#Initialize Gaussian shape noise generator
	generator = GaussianNoiseGenerator.forMap(sample_map)
	
	#Build Ensemble instance with the maps to analyze
	map_ensemble = Ensemble.fromfilelist(range(1,num_realizations+1))
	
	#Measure the histograms and load the data in the ensemble
	map_ensemble.load(callback_loader=compute_histograms,pool=pool,simulation_set=simulation_set,smoothing_scales=smoothing_scales,index=idx,generator=generator,bin_edges=bin_edges)
	
	if pool is not None:
		pool.close()

	##########################################################################################################################################
	###############################Ensemble data available at this point for covariance, PCA, etc...##########################################
Beispiel #32
0
def measure_power_spectrum(filename,l_edges):

	conv_map = ConvergenceMap.load(filename)
	l,Pl = conv_map.powerSpectrum(l_edges)
	return Pl
Beispiel #33
0
try:

    from lenstools import ConvergenceMap

except ImportError:

    import sys
    sys.path.append("..")
    from lenstools import ConvergenceMap

import numpy as np
from astropy.units import deg, rad

import matplotlib.pyplot as plt

test_map = ConvergenceMap.load("Data/conv.fit")

#Set bin edges
l_edges = np.arange(200.0, 50000.0, 200.0)
thresholds_mf = np.arange(-2.0, 2.0, 0.2)
thresholds_pk = np.arange(-1.0, 5.0, 0.2)


def test_visualize():

    assert test_map.data.dtype == np.float

    test_map.setAngularUnits(deg)
    test_map.visualize()
    test_map.savefig("map.png")
    test_map.setAngularUnits(deg)
Beispiel #34
0
	
	from lenstools import ConvergenceMap

except ImportError:
	
	import sys
	sys.path.append("..")
	from lenstools import ConvergenceMap

import numpy as np
from astropy.units import deg,rad

import matplotlib.pyplot as plt


test_map = ConvergenceMap.load("Data/conv.fit")

#Set bin edges
l_edges = np.arange(200.0,50000.0,200.0)
thresholds_mf = np.arange(-2.0,2.0,0.2)
thresholds_pk = np.arange(-1.0,5.0,0.2)

def test_visualize():

	assert test_map.data.dtype == np.float

	test_map.setAngularUnits(deg)
	test_map.visualize()
	test_map.savefig("map.png")
	test_map.setAngularUnits(deg)
Beispiel #35
0
def map_stats(cosmo_tomo_cone):
    '''for fits file fn, generate ps, peaks, minima, pdf, MFs
    fn: input file name, including full path
    tomo=1, 2,..5: int, for tomographic bins
    cone=1, 2,..5: int, for light cones'''

    if len(cosmo_tomo_cone) == 3:
        cosmo, tomo, cone = cosmo_tomo_cone
        ipz = ''
    else:
        cosmo, tomo, cone, ipz = cosmo_tomo_cone

    ##################################
    ### generate random see, such that it is the same for all cosmology
    ### but different for tomo and cone
    ##################################

    if cosmo == 'cov':
        iseed = int(10000 + cone * 10 + tomo)
        out_dir = dir_cov
        fn = cov_fn_gen(tomo, cone)

    elif cosmo == 'bias':
        iseed = int(20000 + cone * 10 + tomo)  #20000
        out_dir = dir_bias
        fn = bias_fn_gen(tomo, ipz, cone)

    else:  ## all cosmologies
        if cosmo[-1] == 'a':
            iseed = int(cone * 10 +
                        tomo)  ## cone goes from 1 to 25, so 10 to 250
        #else:#
        elif cosmo[
                -1] == 'f':  ##'f' starts with a different seed from the 'a' cosmology
            iseed = int(1000 + cone * 10 + tomo)
        out_dir = dir_cosmos
        fn = cosmo_fn_gen(cosmo, tomo, cone)

    print(fn)

    ##################################
    #### check if the map and comoputed stats files are there
    ##################################

    ############ check fits file exist
    if not os.path.isfile(fn):
        print(fn, 'fits file does not exist \n')
        return 0

    out_fn_arr = [
        out_dir + cosmo + '_tomo%i_cone%s_s%i.npy' % (tomo, cone, theta_g)
        for theta_g in theta_g_arr
    ]

    ############# check if stats files exist; if yes, skip computation
    if np.prod(array([os.path.isfile(out_fn) for out_fn in out_fn_arr])):
        ### check if the product of boolean elements in the array = 1 (meaning for all smoothing scales)
        print(fn, 'stats files exist; skip computation.\n')
        return 0  ### all files already exist, no need to process

    ##################################
    ########## map operations
    ##################################

    imap = fits.open(fn)[0].data  ## open the file

    ### add noise
    seed(iseed)
    noise_map = np.random.normal(loc=0.0,
                                 scale=sigma_pix_arr[tomo - 1],
                                 size=(map_pix, map_pix))
    kappa_map = ConvergenceMap(data=imap + noise_map, angle=map_side_deg)
    noise_map = 0  ## release the memory

    ### compute stats
    ## 3 smoothing
    ## 9 cols: ell, ps, kappa, peak, minima, pdf, v0, v1, v2
    ps_noiseless = ConvergenceMap(data=imap,
                                  angle=map_side_deg).powerSpectrum(l_edges)
    ps_unsmoothed = kappa_map.powerSpectrum(
        l_edges)  ## power spectrum should be computed on unsmoothed maps

    s = 0
    for theta_g in theta_g_arr:
        out_fn = out_dir + cosmo + '%s_tomo%i_cone%s_s%i.npy' % (ipz, tomo,
                                                                 cone, theta_g)
        imap = kappa_map.smooth(theta_g * u.arcmin)
        out = zeros(shape=(11, Nbin))
        kappa_bins = kappa_bin_edges[s][tomo - 1]
        ps = imap.powerSpectrum(l_edges)
        peak = imap.peakCount(kappa_bins)
        minima = ConvergenceMap(data=-imap.data,
                                angle=map_side_deg).peakCount(kappa_bins)
        pdf = imap.pdf(kappa_bins)
        mfs = imap.minkowskiFunctionals(kappa_bins)
        out[0] = ps[0]
        out[1] = ps_noiseless[1]
        out[2] = ps_unsmoothed[1]
        out[3] = ps[1]
        out[4] = peak[0]
        out[5] = peak[1]
        out[6] = minima[1][::-1]
        out[7] = pdf[1]
        out[8] = mfs[1]
        out[9] = mfs[2]
        out[10] = mfs[3]
        save(out_fn, out)  ### save the file
        s += 1
def cfht_convergence_measure_all(filename,index,mask_filename,mean_subtract=False):

	"""
	Measures all the statistical descriptors of a convergence map as indicated by the index instance
	
	"""

	logging.debug("Processing {0}".format(filename))

	#Load the map
	conv_map = ConvergenceMap.load(filename,format=cfht_fits_loader)

	if mask_filename is not None:
		
		#Load the mask
		mask_profile = ConvergenceMap.load(mask_filename,format=cfht_fits_loader)
		logging.debug("Loading mask from {0}".format(mask_filename))
		#Mask the map
		masked_conv_map = conv_map.mask(mask_profile)

	if mean_subtract:
		
		if mask_filename is not None:
			masked_conv_map.data -= masked_conv_map.mean()
		else:
			conv_map.data -= conv_map.mean()

	#Allocate memory for observables
	descriptors = index
	observables = np.zeros(descriptors.size)

	#Measure descriptors as directed by input
	for n in range(descriptors.num_descriptors):

		
		if type(descriptors[n]) == PowerSpectrum:
			
			if mask_filename is None:
				l,observables[descriptors[n].first:descriptors[n].last] = conv_map.powerSpectrum(descriptors[n].l_edges)
			else:
				l,observables[descriptors[n].first:descriptors[n].last] = (conv_map*mask_profile).powerSpectrum(descriptors[n].l_edges)

		elif type(descriptors[n]) == Moments:

			if mask_filename is None:
				observables[descriptors[n].first:descriptors[n].last] = conv_map.moments(connected=descriptors[n].connected)
			else:
				observables[descriptors[n].first:descriptors[n].last] = masked_conv_map.moments(connected=descriptors[n].connected)
		
		elif type(descriptors[n]) == Peaks:
			
			if mask_filename is None:
				v,observables[descriptors[n].first:descriptors[n].last] = conv_map.peakCount(descriptors[n].thresholds,norm=descriptors[n].norm)
			else:
				v,observables[descriptors[n].first:descriptors[n].last] = masked_conv_map.peakCount(descriptors[n].thresholds,norm=descriptors[n].norm)

		elif type(descriptors[n]) == PDF:

			if mask_filename is None:
				v,observables[descriptors[n].first:descriptors[n].last] = conv_map.pdf(descriptors[n].thresholds,norm=descriptors[n].norm)
			else:
				v,observables[descriptors[n].first:descriptors[n].last] = masked_conv_map.pdf(descriptors[n].thresholds,norm=descriptors[n].norm)
		
		elif type(descriptors[n]) == MinkowskiAll:
			
			if mask_filename is None:
				v,V0,V1,V2 = conv_map.minkowskiFunctionals(descriptors[n].thresholds,norm=descriptors[n].norm)
			else:
				v,V0,V1,V2 = masked_conv_map.minkowskiFunctionals(descriptors[n].thresholds,norm=descriptors[n].norm)
			
			observables[descriptors[n].first:descriptors[n].last] = np.hstack((V0,V1,V2))
		
		elif type(descriptors[n]) == MinkowskiSingle:
			
			raise ValueError("Due to computational performance you have to measure all Minkowski functionals at once!")
		
		else:
			
			raise ValueError("Measurement of this descriptor not implemented!!!")

	#Return
	return observables
Beispiel #37
0
    root_path = cmd_args.path
    num_realizations = cmd_args.num_realizations

    #Smoothing scales in arcmin
    smoothing_scales = [theta * arcmin for theta in [0.1, 0.5, 1.0, 2.0]]
    bin_edges = np.ogrid[-0.15:0.15:128j]
    bin_midpoints = 0.5 * (bin_edges[1:] + bin_edges[:-1])

    #Create smoothing scale index for the histogram
    idx = Indexer.stack([PDF(bin_edges) for scale in smoothing_scales])

    #Create IGS1 simulation set object to look for the right simulations
    simulation_set = IGS1(root_path=root_path)

    #Look at a sample map
    sample_map = ConvergenceMap.load(
        simulation_set.getNames(z=1.0, realizations=[1])[0])

    #Initialize Gaussian shape noise generator
    generator = GaussianNoiseGenerator.forMap(sample_map)

    #Build Ensemble instance with the maps to analyze
    map_ensemble = Ensemble.fromfilelist(range(1, num_realizations + 1))

    #Measure the histograms and load the data in the ensemble
    map_ensemble.load(callback_loader=compute_histograms,
                      pool=pool,
                      simulation_set=simulation_set,
                      smoothing_scales=smoothing_scales,
                      index=idx,
                      generator=generator,
                      bin_edges=bin_edges)
Beispiel #38
0
def test_ray_simple():

	z_final = 2.0

	start = time.time()
	last_timestamp = start

	#Start a bucket of light rays from these positions
	b = np.linspace(0.0,tracer.lens[0].side_angle.to(deg).value,512)
	xx,yy = np.meshgrid(b,b)
	pos = np.array([xx,yy]) * deg

	#Trace the rays
	fin = tracer.shoot(pos,z=z_final)

	now = time.time()
	logging.info("Ray tracing completed in {0:.3f}s".format(now-last_timestamp))
	last_timestamp = now

	#Build the deflection plane
	dfl = DeflectionPlane(fin.value-pos.value,angle=tracer.lens[0].side_angle,redshift=tracer.redshift[-1],cosmology=tracer.lens[0].cosmology,unit=pos.unit)

	#Compute shear and convergence
	conv = dfl.convergence()
	shear = dfl.shear()
	omega = dfl.omega()

	now = time.time()
	logging.info("Weak lensing calculations completed in {0:.3f}s".format(now-last_timestamp))
	last_timestamp = now

	#Finally visualize the result
	conv.visualize(colorbar=True)
	conv.savefig("raytraced_convergence.png")
	omega.visualize(colorbar=True)
	omega.savefig("raytraced_omega.png")
	shear.visualize(colorbar=True)
	shear.savefig("raytraced_shear.png")

	#We want to plot the power spectrum of the raytraced maps
	fig,ax = plt.subplots()
	l_edges = np.arange(200.0,10000.0,100.0)
	l,Pl = conv.powerSpectrum(l_edges)
	ax.plot(l,l*(l+1)*Pl/(2.0*np.pi),label="From ray positions")

	#And why not, E and B modes too
	figEB,axEB = plt.subplots()
	l,EEl,BBl,EBl = shear.decompose(l_edges)
	axEB.plot(l,l*(l+1)*EEl/(2.0*np.pi),label="EE From ray positions",color="black")
	axEB.plot(l,l*(l+1)*BBl/(2.0*np.pi),label="BB From ray positions",color="green")
	axEB.plot(l,l*(l+1)*np.abs(EBl)/(2.0*np.pi),label="EB From ray positions",color="blue")

	#Now compute the shear and convergence raytracing the actual jacobians (more expensive computationally cause it computes the jacobian at every step)
	finJ = tracer.shoot(pos,z=z_final,kind="jacobians")
	conv = ConvergenceMap(data=1.0-0.5*(finJ[0]+finJ[3]),angle=conv.side_angle)
	shear = ShearMap(data=np.array([0.5*(finJ[3]-finJ[0]),-0.5*(finJ[1]+finJ[2])]),angle=shear.side_angle)

	now = time.time()
	logging.info("Jacobian ray tracing completed in {0:.3f}s".format(now-last_timestamp))
	last_timestamp = now

	#Finally visualize the result
	conv.visualize(colorbar=True)
	conv.savefig("raytraced_convergence_jacobian.png")
	shear.visualize(colorbar=True)
	shear.savefig("raytraced_shear_jacobian.png")

	#We want to plot the power spectrum of the raytraced maps
	l,Pl = conv.powerSpectrum(l_edges)
	ax.plot(l,l*(l+1)*Pl/(2.0*np.pi),label="From Jacobians")
	ax.set_xlabel(r"$l$")
	ax.set_ylabel(r"$l(l+1)P_l/2\pi$")
	ax.set_xscale("log")
	ax.set_yscale("log")
	ax.legend()
	fig.savefig("raytracing_conv_power.png")

	#And why not, E and B modes too
	axEB.plot(l,l*(l+1)*EEl/(2.0*np.pi),label="EE From jacobians",color="black",linestyle="--")
	axEB.plot(l,l*(l+1)*BBl/(2.0*np.pi),label="BB From jacobians",color="green",linestyle="--")
	axEB.plot(l,l*(l+1)*np.abs(EBl)/(2.0*np.pi),label="EB From jacobians",color="blue",linestyle="--")
	axEB.set_xlabel(r"$l$")
	axEB.set_ylabel(r"$l(l+1)P_l/2\pi$")
	axEB.set_xscale("log")
	axEB.set_yscale("log")
	axEB.legend(loc="lower right",prop={"size":10})
	figEB.savefig("raytracing_shear_power.png")

	now = time.time()
	logging.info("Total runtime {0:.3f}s".format(now-start))
Beispiel #39
0
 def power_spectrum(self, im):
     """Calculate power spectrum."""
     conv_map = ConvergenceMap(im, angle=u.degree * 3.5)
     l, Pl = conv_map.powerSpectrum(self.bins)
     return Pl
Beispiel #40
0
def singleRedshift(pool,batch,settings,id):

	#Safety check
	assert isinstance(pool,MPIWhirlPool) or (pool is None)
	assert isinstance(batch,SimulationBatch)

	parts = id.split("|")

	if len(parts)==2:

		assert isinstance(settings,MapSettings)
	
		#Separate the id into cosmo_id and geometry_id
		cosmo_id,geometry_id = parts

		#Get a handle on the model
		model = batch.getModel(cosmo_id)

		#Get the corresponding simulation collection and map batch handlers
		collection = [model.getCollection(geometry_id)]
		map_batch = collection[0].getMapSet(settings.directory_name)
		cut_redshifts = np.array([0.0])

	elif len(parts)==1:

		assert isinstance(settings,TelescopicMapSettings)

		#Get a handle on the model
		model = batch.getModel(parts[0])

		#Get the corresponding simulation collection and map batch handlers
		map_batch = model.getTelescopicMapSet(settings.directory_name)
		collection = map_batch.mapcollections
		cut_redshifts = map_batch.redshifts

	else:
		
		if (pool is None) or (pool.is_master()):
			logdriver.error("Format error in {0}: too many '|'".format(id))
		sys.exit(1)


	#Override the settings with the previously pickled ones, if prompted by user
	if settings.override_with_local:

		local_settings_file = os.path.join(map_batch.home_subdir,"settings.p")
		settings = MapSettings.read(local_settings_file)
		assert isinstance(settings,MapSettings)

		if (pool is None) or (pool.is_master()):
			logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))

	##################################################################
	##################Settings read###################################
	##################################################################

	#Set random seed to generate the realizations
	if pool is not None:
		np.random.seed(settings.seed + pool.rank)
	else:
		np.random.seed(settings.seed)

	#Read map angle,redshift and resolution from the settings
	map_angle = settings.map_angle
	source_redshift = settings.source_redshift
	resolution = settings.map_resolution

	if len(parts)==2:

		#########################
		#Use a single collection#
		#########################

		#Read the plane set we should use
		plane_set = (settings.plane_set,)

		#Randomization
		nbody_realizations = (settings.mix_nbody_realizations,)
		cut_points = (settings.mix_cut_points,)
		normals = (settings.mix_normals,)
		map_realizations = settings.lens_map_realizations

	elif len(parts)==1:

		#######################
		#####Telescopic########
		#######################

		#Check that we have enough info
		for attr_name in ["plane_set","mix_nbody_realizations","mix_cut_points","mix_normals"]:
			if len(getattr(settings,attr_name))!=len(collection):
				if (pool is None) or (pool.is_master()):
					logdriver.error("You need to specify a setting {0} for each collection!".format(attr_name))
				sys.exit(1)

		#Read the plane set we should use
		plane_set = settings.plane_set

		#Randomization
		nbody_realizations = settings.mix_nbody_realizations
		cut_points = settings.mix_cut_points
		normals = settings.mix_normals
		map_realizations = settings.lens_map_realizations



	#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
	try:
		realization_offset = settings.first_realization - 1
	except AttributeError:
		realization_offset = 0

	if pool is None:
		first_map_realization = 0 + realization_offset
		last_map_realization = map_realizations + realization_offset
		realizations_per_task = map_realizations
		logdriver.debug("Generating lensing map realizations from {0} to {1}".format(first_map_realization+1,last_map_realization))
	else:
		assert map_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, map_realizations must be a multiple of the number of MPI tasks!"
		realizations_per_task = map_realizations//(pool.size+1)
		first_map_realization = realizations_per_task*pool.rank + realization_offset
		last_map_realization = realizations_per_task*(pool.rank+1) + realization_offset
		logdriver.debug("Task {0} will generate lensing map realizations from {1} to {2}".format(pool.rank,first_map_realization+1,last_map_realization))

	#Planes will be read from this path
	plane_path = os.path.join("{0}","ic{1}","{2}")

	if (pool is None) or (pool.is_master()):
		for c,coll in enumerate(collection):
			logdriver.info("Reading planes from {0}".format(plane_path.format(coll.storage_subdir,"-".join([str(n) for n in nbody_realizations[c]]),plane_set[c])))

	#Plane info file is the same for all collections
	if (not hasattr(settings,"plane_info_file")) or (settings.plane_info_file is None):
		info_filename = batch.syshandler.map(os.path.join(plane_path.format(collection[0].storage_subdir,nbody_realizations[0][0],plane_set[0]),"info.txt"))
	else:
		info_filename = settings.plane_info_file

	if (pool is None) or (pool.is_master()):
		logdriver.info("Reading lens plane summary information from {0}".format(info_filename))

	#Read how many snapshots are available
	with open(info_filename,"r") as infofile:
		num_snapshots = len(infofile.readlines())

	#Save path for the maps
	save_path = map_batch.storage_subdir

	if (pool is None) or (pool.is_master()):
		logdriver.info("Lensing maps will be saved to {0}".format(save_path))

	begin = time.time()

	#Log initial memory load
	peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
	if (pool is None) or (pool.is_master()):
		logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))

	#We need one of these for cycles for each map random realization
	for rloc,r in enumerate(range(first_map_realization,last_map_realization)):

		#Instantiate the RayTracer
		tracer = RayTracer()

		#Force garbage collection
		gc.collect()

		#Start timestep
		start = time.time()
		last_timestamp = start

		#############################################################
		###############Add the lenses to the system##################
		#############################################################

		#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
		infofile = open(info_filename,"r")

		#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
		for s in range(num_snapshots):

			#Read the line
			line = infofile.readline().strip("\n")

			#Stop if there is nothing more to read
			if line=="":
				break

			#Split the line in snapshot,distance,redshift
			line = line.split(",")

			snapshot_number = int(line[0].split("=")[1])
		
			distance,unit = line[1].split("=")[1].split(" ")
			if unit=="Mpc/h":
				distance = float(distance)*model.Mpc_over_h
			else:
				distance = float(distance)*getattr(u,"unit")

			lens_redshift = float(line[2].split("=")[1])

			#Select the right collection
			for n,z in enumerate(cut_redshifts):
				if lens_redshift>=z:
					c = n

			#Randomization of planes
			nbody = np.random.randint(low=0,high=len(nbody_realizations[c]))
			cut = np.random.randint(low=0,high=len(cut_points[c]))
			normal = np.random.randint(low=0,high=len(normals[c]))

			#Log to user
			logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[c][nbody],cut_points[c][cut],normals[c][normal]))

			#Add the lens to the system
			logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
			plane_name = batch.syshandler.map(os.path.join(plane_path.format(collection[c].storage_subdir,nbody_realizations[c][nbody],plane_set[c]),settings.plane_name_format.format(snapshot_number,cut_points[c][cut],normals[c][normal],settings.plane_format)))
			tracer.addLens((plane_name,distance,lens_redshift))

		#Close the infofile
		infofile.close()

		now = time.time()
		logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
		last_timestamp = now

		#Rearrange the lenses according to redshift and roll them randomly along the axes
		tracer.reorderLenses()

		now = time.time()
		logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
		last_timestamp = now

		#Start a bucket of light rays from a regular grid of initial positions
		b = np.linspace(0.0,map_angle.value,resolution)
		xx,yy = np.meshgrid(b,b)
		pos = np.array([xx,yy]) * map_angle.unit

		#Trace the ray deflections
		jacobian = tracer.shoot(pos,z=source_redshift,kind="jacobians")

		now = time.time()
		logdriver.info("Jacobian ray tracing for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
		last_timestamp = now

		#Compute shear,convergence and omega from the jacobians
		if settings.convergence:
		
			convMap = ConvergenceMap(data=1.0-0.5*(jacobian[0]+jacobian[3]),angle=map_angle)
			savename = batch.syshandler.map(os.path.join(save_path,"WLconv_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
			logdriver.info("Saving convergence map to {0}".format(savename)) 
			convMap.save(savename)
			logdriver.debug("Saved convergence map to {0}".format(savename)) 

		##############################################################################################################################
	
		if settings.shear:
		
			shearMap = ShearMap(data=np.array([0.5*(jacobian[3]-jacobian[0]),-0.5*(jacobian[1]+jacobian[2])]),angle=map_angle)
			savename = batch.syshandler.map(os.path.join(save_path,"WLshear_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
			logdriver.info("Saving shear map to {0}".format(savename))
			shearMap.save(savename) 

		##############################################################################################################################
	
		if settings.omega:
		
			omegaMap = Spin0(data=-0.5*(jacobian[2]-jacobian[1]),angle=map_angle)
			savename = batch.syshandler.map(os.path.join(save_path,"WLomega_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
			logdriver.info("Saving omega map to {0}".format(savename))
			omegaMap.save(savename)

		now = time.time()
		
		#Log peak memory usage to stdout
		peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
		logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
		logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))

		#Log progress and peak memory usage to stderr
		if (pool is None) or (pool.is_master()):
			logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
	
	#Safety sync barrier
	if pool is not None:
		pool.comm.Barrier()

	if (pool is None) or (pool.is_master()):	
		now = time.time()
		logdriver.info("Total runtime {0:.3f}s".format(now-begin))
Beispiel #41
0
#!python
# Jia Liu 2015/05/24
# This code reads in AHF maps and Gadget maps
# and return the peaks/ps for each set

#import WLanalysis
import numpy as np
from scipy import *
import sys, glob, os
#sys.modules["mpi4py"] = None
from emcee.utils import MPIPool
#from lenstools import Ensemble
from lenstools import ConvergenceMap 
from lenstools.defaults import load_fits_default_convergence

kmin = -0.08
kmax = 0.12
thresholds = linspace(kmin, kmax, 26)

peaksGen = lambda fn: ConvergenceMap.load(fn).peakCount(thresholds)

home = '/work/02977/jialiu/lenstools_home/'

storage = '/scratch/02977/jialiu/lenstools_storage/'

glob.glob()

peaks_fn_amiga = os.path.join(home,'Om0.300_Ol0.700/512b240/peaks_amiga.npy')

peaks_fn_gadget = os.path.join(home,'Om0.300_Ol0.700/512b240/peaks_gadget.npy')