Ejemplo n.º 1
0
residual_map_path = result_path + "/image/fits/fit_residual_map.fits"
chi_squared_map_path = result_path + "/image/fits/fit_chi_squared_map.fits"

# Now, lets load this arrays as a hyper arrays. A hyper arrays is an ordinary NumPy arrays, but it also includes a pixel
# scale which allows us to convert the axes of the arrays to arc-second coordinates.
image = al.array.from_fits(file_path=image_path, hdu=0, pixel_scales=0.03)

# Now, lets load this image as a hyper arrays. A hyper arrays is an ordinary NumPy arrays, but it also includes a pixel
# scale which allows us to convert the axes of the image to arc-second coordinates.
residual_map = al.array.from_fits(file_path=residual_map_path, hdu=0, pixel_scales=0.03)

# We can now use an arrays plotter to plotters the residual map.

plotter = aplt.Plotter(labels=aplt.Labels(title="SLACS1430+4105 Residual Map"))

aplt.array(array=residual_map, plotter=plotter)

# A useful way to really dig into the residuals is to set upper and lower limits on the normalization of the colorbar.

plotter = aplt.Plotter(
    labels=aplt.Labels(title="SLACS1430+4105 Residual Map"),
    cmap=aplt.ColorMap(norm_min=-0.02, norm_max=0.02),
)

aplt.array(array=residual_map, plotter=plotter)

# Or, alternatively, use a symmetric logarithmic colormap

plotter = aplt.Plotter(
    labels=aplt.Labels(title="SLACS1430+4105 Residual Map"),
    cmap=aplt.ColorMap(norm="symmetric_log", linthresh=0.02, linscale=0.02),
Ejemplo n.º 2
0
                                          phi=45.0,
                                          einstein_radius=0.5)

interp_deflections = mass_profile.deflections_from_grid(
    grid=interpolator.interp_grid)
deflections = np.zeros((masked_imaging.grid.shape[0], 2))
deflections[:, 0] = interpolator.interpolated_values_from_values(
    values=interp_deflections[:, 0])
deflections[:, 1] = interpolator.interpolated_values_from_values(
    values=interp_deflections[:, 1])

true_deflections = mass_profile.deflections_from_grid(grid=masked_imaging.grid)

difference_y = deflections[:, 0] - true_deflections[:, 0]
difference_x = deflections[:, 1] - true_deflections[:, 1]

print("interpolation y error: ", np.mean(difference_y))
print("interpolation y uncertainty: ", np.std(difference_y))
print("interpolation y max error: ", np.max(difference_y))
print("interpolation x error: ", np.mean(difference_x))
print("interpolation x uncertainty: ", np.std(difference_x))
print("interpolation x max error: ", np.max(difference_x))

difference_y_2d = masked_imaging.grid.array_stored_1d_from_sub_array_1d(
    sub_array_1d=difference_y)
difference_x_2d = masked_imaging.grid.array_stored_1d_from_sub_array_1d(
    sub_array_1d=difference_x)

aplt.array(array=difference_y_2d)
aplt.array(array=difference_x_2d)
# Lets look at a few contribution maps, generated using hyper-galaxy's with different contribution factors.

source_contribution_factor_1 = al.Galaxy(
    redshift=1.0,
    hyper_galaxy=al.HyperGalaxy(contribution_factor=1.0),
    hyper_galaxy_image=hyper_image,
    hyper_model_image=hyper_image,
)

contribution_map = source_contribution_factor_1.hyper_galaxy.contribution_map_from_hyper_images(
    hyper_model_image=hyper_image, hyper_galaxy_image=hyper_image)

aplt.array(
    array=contribution_map,
    mask=mask,
    plotter=aplt.Plotter(labels=aplt.Labels(title="Contribution Map")),
)

source_contribution_factor_3 = al.Galaxy(
    redshift=1.0,
    hyper_galaxy=al.HyperGalaxy(contribution_factor=3.0),
    hyper_galaxy_image=hyper_image,
    hyper_model_image=hyper_image,
)

contribution_map = source_contribution_factor_3.hyper_galaxy.contribution_map_from_hyper_images(
    hyper_model_image=hyper_image, hyper_galaxy_image=hyper_image)

aplt.array(
    array=contribution_map,
Ejemplo n.º 4
0
    norm_min=1.0e-4,
    norm_max=0.4 * np.max(image),
    linthresh=0.05,
    linscale=0.1,
)

scribbler = scribbler.Scribbler(image=image.in_2d, cmap=cmap)
mask = scribbler.show_mask()
mask = al.mask.manual(mask_2d=mask, pixel_scales=pixel_scales)

# Next, load the imaging noise-map, which we will use the scale the noise-map.
noise_map = al.array.from_fits(
    file_path=dataset_path + "noise_map.fits", pixel_scales=pixel_scales
)

# Now lets plotters the image and mask, so we can check that the mask includes the regions of the image we want.
# data_plotters.plot_signal_to_noise_map(signal_to_noise_map=image / noise_map)

# Here, we manually increase the noise values to extremely large values, such that the analysis essentially omits them.
noise_map = np.where(mask, 1.0e8, noise_map.in_2d)
noise_map = al.array.manual_2d(array=noise_map, pixel_scales=pixel_scales)

# The signal to noise map is the best way to determine if these regions are appropriately masked out.
aplt.array(array=image / noise_map)

# Now we're happy with the mask, lets output it to the dataset folder of the lens, so that we can load it from a .fits
# file in our pipelines!
noise_map.output_to_fits(
    file_path=dataset_path + "noise_map_scaled.fits", overwrite=True
)
Ejemplo n.º 5
0
workspace_path = "{}/../../../".format(
    os.path.dirname(os.path.realpath(__file__)))

# The 'dataset label' is the name of the dataset folder and 'dataset_name' the folder the positions are stored in e.g,
# the positions will be output as '/autolens_workspace/dataset/dataset_label/dataset_name/positions.dat'.
dataset_label = "imaging"
dataset_name = "lens_sie__source_sersic"

# Create the path where the mask will be output, which in this case is
# '/autolens_workspace/dataset/imaging/lens_sie__source_sersic/'
dataset_path = af.path_util.make_and_return_path_from_path_and_folder_names(
    path=workspace_path, folder_names=["dataset", dataset_label, dataset_name])

# If you use this tool for your own dataset, you *must* double check this pixel scale is correct!
pixel_scales = 0.1

# First, load the imaging dataset, so that the positions can be plotted over the strong lens image.
image = al.array.from_fits(file_path=dataset_path + "image.fits",
                           pixel_scales=pixel_scales)

# Now, create a set of positions, which is a python list of (y,x) values.
lens_light_centre = al.coordinates([[(0.0, 0.0)]])

# Now lets plotters the image and positions, so we can check that the positions overlap different regions of the source.
aplt.array(array=image, light_profile_centres=lens_light_centre)

# Now we're happy with the positions, lets output them to the dataset folder of the lens, so that we can load them from a
# .dat file in our pipelines!
lens_light_centre.output_to_file(file_path=dataset_path +
                                 "lens_light_centre.dat")
Ejemplo n.º 6
0
# Create the path where the mask will be output, which in this case is
# '/autolens_workspace/dataset/imaging/lens_sie__source_sersic/'
dataset_path = af.path_util.make_and_return_path_from_path_and_folder_names(
    path=workspace_path, folder_names=["dataset", dataset_label, dataset_name])

# If you use this tool for your own dataset, you *must* double check this pixel scale is correct!
pixel_scales = 0.1

# First, load the imaging dataset, so that the mask can be plotted over the strong lens image.
image = al.array.from_fits(file_path=dataset_path + "image.fits",
                           pixel_scales=pixel_scales)

# Now, create a mask for this dataset, using the mask function's we're used to. I'll use a circular-annular mask here,
# but I've commented over options you might want to use (feel free to experiment!)

mask = al.mask.circular_annular(
    shape_2d=image.shape_2d,
    pixel_scales=image.pixel_scales,
    sub_size=1,
    inner_radius=0.5,
    outer_radius=2.5,
    centre=(0.0, 0.0),
)

# Now lets plotters the image and mask, so we can check that the mask includes the regions of the image we want.
aplt.array(array=image, mask=mask)

# Now we're happy with the mask, lets output it to the dataset folder of the lens, so that we can load it from a .fits
# file in our pipelines!
mask.output_to_fits(file_path=dataset_path + "mask.fits", overwrite=True)
    hyper_model_image=
    hyper_image_source,  # <- The source get its own hyper-galaxy image.
)

fit = fit_masked_imaging_with_lens_and_source_galaxy(
    masked_imaging=masked_imaging,
    lens_galaxy=lens_galaxy,
    source_galaxy=source_magnification,
)

lens_contribution_map = lens_galaxy_hyper.hyper_galaxy.contribution_map_from_hyper_images(
    hyper_model_image=hyper_image, hyper_galaxy_image=hyper_image_lens)

aplt.array(
    array=lens_contribution_map,
    mask=mask,
    plotter=aplt.Plotter(labels=aplt.Labels(title="Lens Contribution Map")),
)

source_contribution_map = source_magnification_hyper.hyper_galaxy.contribution_map_from_hyper_images(
    hyper_model_image=hyper_image, hyper_galaxy_image=hyper_image_source)

aplt.array(
    array=source_contribution_map,
    mask=mask,
    plotter=aplt.Plotter(labels=aplt.Labels(title="Source Contribution Map")),
)

# The contribution maps decomposes the image into its different components. Next, we  use each contribution
# map to scale different regions of the noise-map. From the fit above it was clear that both the lens and source
# required the noise to be scaled, but their different chi-squared values ( > 150 and ~ 30) means they require different
# First, load the imaging dataset and noise-map, so that the mask can be plotted over the strong lens image.
image = al.array.from_fits(file_path=dataset_path + "image.fits",
                           pixel_scales=pixel_scales)

noise_map = al.array.from_fits(file_path=dataset_path + "noise_map.fits",
                               pixel_scales=pixel_scales)

# Create the 2D Gaussian that the image is blurred with. This blurring is an attempt to smooth over noise in the
# image, which will lead unmasked values with in individual pixels if not smoothed over correctly.
blurring_gaussian = al.kernel.from_gaussian(shape_2d=(31, 31),
                                            pixel_scales=pixel_scales,
                                            sigma=blurring_gaussian_sigma)

blurred_image = blurring_gaussian.convolved_array_from_array(array=image)

aplt.array(array=blurred_image)

# Now compute the absolute signal-to-noise map of this blurred image, given the noise-map of the observed dataset.
blurred_signal_to_noise_map = blurred_image / noise_map

aplt.array(array=blurred_signal_to_noise_map)

# Now create the mask in sall pixels where the signal to noise is above some threshold value.
mask = np.where(blurred_signal_to_noise_map.in_2d > signal_to_noise_threshold,
                False, True)

mask = al.mask.manual(mask_2d=mask, pixel_scales=pixel_scales, sub_size=1)

aplt.array(array=image, mask=mask)

# Now we're happy with the mask, lets output it to the dataset folder of the lens, so that we can load it from a .fits
Ejemplo n.º 9
0
# '/autolens_workspace/dataset/imaging/lens_sie__source_sersic_intervening_objects/'
dataset_path = af.path_util.make_and_return_path_from_path_and_folder_names(
    path=workspace_path, folder_names=["dataset", dataset_label, dataset_name])

# If you use this tool for your own dataset, you *must* double check this pixel scale is correct!
pixel_scales = 0.1

# First, load the imaging dataset, so that the location of galaxies is clear when scaling the noise-map.
image = al.array.from_fits(file_path=dataset_path + "image.fits",
                           pixel_scales=pixel_scales)

# Next, load the imaging noise-map, which we will use the scale the noise-map.
noise_map = al.array.from_fits(file_path=dataset_path + "noise_map.fits",
                               pixel_scales=pixel_scales)

# Now lets plotters the image and mask, so we can check that the mask includes the regions of the image we want.
# data_plotters.plot_signal_to_noise_map(signal_to_noise_map=image / noise_map)

# Here, we manually increase the noise values to extremely large values, such that the analysis essentially omits them.
noise_map = noise_map.in_2d
noise_map[25:55, 77:96] = 1.0e8
noise_map[55:85, 3:27] = 1.0e8

# The signal to noise map is the best way to determine if these regions are appropriately masked out.
aplt.array(array=image / noise_map.in_1d)

# Now we're happy with the mask, lets output it to the dataset folder of the lens, so that we can load it from a .fits
# file in our pipelines!
noise_map.output_to_fits(file_path=dataset_path + "noise_map_scaled.fits",
                         overwrite=True)
Ejemplo n.º 10
0
#    16 to 24.

# 3) For the same reason, we increase the size of the colorbar ticks from the default value 10 to 20.

plotter = aplt.Plotter(
    figure=aplt.Figure(figsize=(12, 12)),
    labels=aplt.Labels(title="SLACS1430+4105 Image",
                       titlesize=24,
                       ysize=24,
                       xsize=24),
    ticks=aplt.Ticks(ysize=24, xsize=24),
    cmap=aplt.ColorMap(norm="cold", norm_max=0.8),
    cb=aplt.ColorBar(ticksize=20),
)

aplt.array(array=image, plotter=plotter)

# The colormap of the arrays can be changed to any of the standard matplotlib colormaps.

plotter = aplt.Plotter(labels=aplt.Labels(title="SLACS1430+4105 Image"),
                       cmap=aplt.ColorMap(cmap="spring"))

aplt.array(array=image, plotter=plotter)

# We can change the x / y axis unit_label from arc-seconds to kiloparsec, by inputting a kiloparsec to arcsecond conversion
# factor (for SLACS1430+4105, the lens galaxy is at redshift 0.285, corresponding to the conversion factor below).

plotter = aplt.Plotter(
    labels=aplt.Labels(title="SLACS1430+4105 Image"),
    units=aplt.Units(in_kpc=True, conversion_factor=6.2),
)
Ejemplo n.º 11
0
# Lets look at this in action. We'll inspect 3 cluster_weight_maps, using a weight_power of 0.0, 5.0 and 10.0,
# setting the weight_floor to 0.0 such that it does not change the cluster weight map.

source_weight_power_0 = al.Galaxy(
    redshift=1.0,
    pixelization=al.pix.VoronoiBrightnessImage(pixels=500,
                                               weight_floor=0.0,
                                               weight_power=0.0),
    regularization=al.reg.Constant(coefficient=1.0),
    hyper_galaxy_image=hyper_image,
)

cluster_weight_power_0 = source_weight_power_0.pixelization.weight_map_from_hyper_image(
    hyper_image=source_weight_power_0.hyper_galaxy_image)

aplt.array(array=cluster_weight_power_0, mask=mask)

source_weight_power_5 = al.Galaxy(
    redshift=1.0,
    pixelization=al.pix.VoronoiBrightnessImage(pixels=500,
                                               weight_floor=0.0,
                                               weight_power=5.0),
    regularization=al.reg.Constant(coefficient=1.0),
    hyper_galaxy_image=hyper_image,
)

cluster_weight_power_5 = source_weight_power_5.pixelization.weight_map_from_hyper_image(
    hyper_image=source_weight_power_5.hyper_galaxy_image)

aplt.array(array=cluster_weight_power_5, mask=mask)
Ejemplo n.º 12
0
# Now, create a set of positions, which is a python list of (y,x) values.
positions = al.coordinates([[(0.8, 1.45), (1.78, -0.4), (-0.95, 1.38), (-0.83, -1.04)]])

# These are the positions for the example lens 'lens_sersic_sie__source_sersic__2'
# positions = al.coordinates([[(0.44, 0.60), (-1.4, -0.41), (0.15, -1.45)]])

# These are the positions for the example lens 'lens_sie__source_sersic__2'
# positions = al.coordinates([[(1.28, -1.35), (-0.5, 0.7)]])

# These are the positions for the example lens 'lens_sie__source_sersic_x2'
# positions = al.coordinates([[(2.16, -1.3), (-0.65, 0.45)]])

# We can infact input multiple lists of positions (commented out below), which corresponds to pixels which are \
# anticipated to map to different multiply imaged regions of the source-plane (e.g. you would need something like \
# spectra to be able to do this)
# Images of source 1           # Images of source 2
# positions = [[(1.0, 1.0), (2.0, 0.5)], [(-1.0, -0.1), (2.0, 2.0), (3.0, 3.0)]]

# Now lets plotters the image and positions, so we can check that the positions overlap different regions of the source.
aplt.array(array=image, positions=positions)

# Now we're happy with the positions, lets output them to the dataset folder of the lens, so that we can load them from a
# .dat file in our pipelines!
positions.output_to_file(file_path=dataset_path + "positions.dat")


# These commented out lines would create the positions for the example_multi_plane dataset.
# lens_name = 'example_multi_plane'
# pixel_scales = 0.05
# positions = [[(0.8, 1.12), (-0.64, 1.13), (1.38, -0.2), (-0.72, -0.83)]]