コード例 #1
0
def test__inputs_are_other_python_types__converted_correctly():

    point_dataset_0 = al.PointDataset(
        name="source_1", positions=[[1.0, 1.0]], positions_noise_map=[1.0]
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0])

    assert point_dict["source_1"].name == "source_1"
    assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_1"].fluxes == None
    assert point_dict["source_1"].fluxes_noise_map == None

    point_dataset_0 = al.PointDataset(
        name="source_1",
        positions=[(1.0, 1.0), (2.0, 2.0)],
        positions_noise_map=[1.0],
        fluxes=[2.0],
        fluxes_noise_map=[3.0],
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0])

    assert point_dict["source_1"].name == "source_1"
    assert point_dict["source_1"].positions.in_list == [(1.0, 1.0), (2.0, 2.0)]
    assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_1"].fluxes.in_list == [2.0]
    assert point_dict["source_1"].fluxes_noise_map.in_list == [3.0]
コード例 #2
0
def test__fits_dataset__positions_only():

    point_source = al.ps.Point(centre=(0.1, 0.1))
    galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)

    tracer = al.Tracer.from_galaxies(
        galaxies=[al.Galaxy(redshift=0.5), galaxy_point_source])

    positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
    noise_map = al.ValuesIrregular([0.5, 1.0])
    model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])

    point_solver = al.m.MockPointSolver(model_positions=model_positions)

    point_dataset_0 = al.PointDataset(name="point_0",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0])

    fit = al.FitPointDict(point_dict=point_dict,
                          tracer=tracer,
                          point_solver=point_solver)

    assert fit["point_0"].positions.log_likelihood == pytest.approx(
        -22.14472, 1.0e-4)
    assert fit["point_0"].flux == None
    assert fit.log_likelihood == fit["point_0"].positions.log_likelihood

    point_dataset_1 = al.PointDataset(name="point_1",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dict = al.PointDict(
        point_dataset_list=[point_dataset_0, point_dataset_1])

    fit = al.FitPointDict(point_dict=point_dict,
                          tracer=tracer,
                          point_solver=point_solver)

    assert fit["point_0"].positions.log_likelihood == pytest.approx(
        -22.14472, 1.0e-4)
    assert fit["point_0"].flux == None
    assert fit["point_1"].positions == None
    assert fit["point_1"].flux == None
    assert fit.log_likelihood == fit["point_0"].positions.log_likelihood
コード例 #3
0
def make_dataset():
    return al.PointDataset(
        "name",
        positions=al.Grid2DIrregular([(1, 2)]),
        positions_noise_map=al.ValuesIrregular([1]),
        fluxes=al.ValuesIrregular([2]),
        fluxes_noise_map=al.ValuesIrregular([3]),
    )
コード例 #4
0
def make_point_dataset():
    return al.PointDataset(
        name="point_0",
        positions=make_positions_x2(),
        positions_noise_map=make_positions_noise_map_x2(),
        fluxes=make_fluxes_x2(),
        fluxes_noise_map=make_fluxes_noise_map_x2(),
    )
コード例 #5
0
def test__from_json_and_output_to_json():

    point_dataset_0 = al.PointDataset(
        name="source_1",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
    )

    point_dataset_1 = al.PointDataset(
        name="source_2",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
        fluxes=al.ValuesIrregular([2.0, 3.0]),
        fluxes_noise_map=al.ValuesIrregular([4.0, 5.0]),
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])

    dir_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "files")

    if path.exists(dir_path):
        shutil.rmtree(dir_path)

    os.makedirs(dir_path)

    file_path = path.join(dir_path, "point_dict.json")

    point_dict.output_to_json(file_path=file_path, overwrite=True)

    point_dict_via_json = al.PointDict.from_json(file_path=file_path)

    assert point_dict_via_json["source_1"].name == "source_1"
    assert point_dict_via_json["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict_via_json["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict_via_json["source_1"].fluxes == None
    assert point_dict_via_json["source_1"].fluxes_noise_map == None

    assert point_dict_via_json["source_2"].name == "source_2"
    assert point_dict_via_json["source_2"].positions.in_list == [(1.0, 1.0)]
    assert point_dict_via_json["source_2"].positions_noise_map.in_list == [1.0]
    assert point_dict_via_json["source_2"].fluxes.in_list == [2.0, 3.0]
    assert point_dict_via_json["source_2"].fluxes_noise_map.in_list == [4.0, 5.0]
コード例 #6
0
    def test__figure_of_merit__matches_correct_fit_given_galaxy_profiles(
            self, positions_x2, positions_x2_noise_map):

        point_dataset = al.PointDataset(
            name="point_0",
            positions=positions_x2,
            positions_noise_map=positions_x2_noise_map,
        )

        point_dict = al.PointDict(point_dataset_list=[point_dataset])

        model = af.Collection(galaxies=af.Collection(lens=al.Galaxy(
            redshift=0.5, point_0=al.ps.Point(centre=(0.0, 0.0)))))

        solver = al.m.MockPointSolver(model_positions=positions_x2)

        analysis = al.AnalysisPoint(point_dict=point_dict, solver=solver)

        instance = model.instance_from_unit_vector([])
        analysis_log_likelihood = analysis.log_likelihood_function(
            instance=instance)

        tracer = analysis.tracer_via_instance_from(instance=instance)

        fit_positions = al.FitPositionsImage(
            name="point_0",
            positions=positions_x2,
            noise_map=positions_x2_noise_map,
            tracer=tracer,
            point_solver=solver,
        )

        assert fit_positions.chi_squared == 0.0
        assert fit_positions.log_likelihood == analysis_log_likelihood

        model_positions = al.Grid2DIrregular([(0.0, 1.0), (1.0, 2.0)])
        solver = al.m.MockPointSolver(model_positions=model_positions)

        analysis = al.AnalysisPoint(point_dict=point_dict, solver=solver)

        analysis_log_likelihood = analysis.log_likelihood_function(
            instance=instance)

        fit_positions = al.FitPositionsImage(
            name="point_0",
            positions=positions_x2,
            noise_map=positions_x2_noise_map,
            tracer=tracer,
            point_solver=solver,
        )

        assert fit_positions.residual_map.in_list == [1.0, 1.0]
        assert fit_positions.chi_squared == 2.0
        assert fit_positions.log_likelihood == analysis_log_likelihood
コード例 #7
0
def test__model_has_image_and_source_chi_squared__fits_both_correctly():

    galaxy_point_image = al.Galaxy(redshift=1.0,
                                   point_0=al.ps.Point(centre=(0.1, 0.1)))

    galaxy_point_source = al.Galaxy(redshift=1.0,
                                    point_1=al.ps.PointSourceChi(centre=(0.1,
                                                                         0.1)))

    tracer = al.Tracer.from_galaxies(galaxies=[
        al.Galaxy(redshift=0.5), galaxy_point_image, galaxy_point_source
    ])

    positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
    noise_map = al.ValuesIrregular([0.5, 1.0])
    model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])

    point_solver = al.m.MockPointSolver(model_positions=model_positions)

    point_dataset_0 = al.PointDataset(name="point_0",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dataset_1 = al.PointDataset(name="point_1",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dict = al.PointDict(
        point_dataset_list=[point_dataset_0, point_dataset_1])

    fit = al.FitPointDict(point_dict=point_dict,
                          tracer=tracer,
                          point_solver=point_solver)

    assert isinstance(fit["point_0"].positions, al.FitPositionsImage)
    assert isinstance(fit["point_1"].positions, al.FitPositionsSource)

    assert fit[
        "point_0"].positions.model_positions.in_list == model_positions.in_list
    assert fit[
        "point_1"].positions.model_positions.in_list == positions.in_list
コード例 #8
0
def test__point_dataset_structures_as_dict():

    point_dataset_0 = al.PointDataset(
        name="source_1",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0])

    assert point_dict["source_1"].name == "source_1"
    assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_1"].fluxes == None
    assert point_dict["source_1"].fluxes_noise_map == None

    point_dataset_1 = al.PointDataset(
        name="source_2",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
        fluxes=al.ValuesIrregular([2.0, 3.0]),
        fluxes_noise_map=al.ValuesIrregular([4.0, 5.0]),
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])

    assert point_dict["source_1"].name == "source_1"
    assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_1"].fluxes == None
    assert point_dict["source_1"].fluxes_noise_map == None

    assert point_dict["source_2"].name == "source_2"
    assert point_dict["source_2"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_2"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_2"].fluxes.in_list == [2.0, 3.0]
    assert point_dict["source_2"].fluxes_noise_map.in_list == [4.0, 5.0]

    assert (point_dict.positions_list[0] == np.array([1.0, 1.0])).all()
    assert (point_dict.positions_list[1] == np.array([1.0, 1.0])).all()
コード例 #9
0
"""
magnifications = tracer.magnification_2d_via_hessian_from(grid=positions)
"""
We can now compute the observed fluxes of the `Point`, give we know how much each is magnified.
"""
flux = 1.0
fluxes = [flux * np.abs(magnification) for magnification in magnifications]
fluxes = al.ValuesIrregular(values=fluxes)
"""
Create a point-source dictionary data object and output this to a `.json` file, which is the format used to load and
analyse the dataset.
"""
point_dataset = al.PointDataset(
    name="point_0",
    positions=positions,
    positions_noise_map=positions.values_via_value_from(
        value=grid.pixel_scale),
    fluxes=fluxes,
    fluxes_noise_map=al.ValuesIrregular(values=[1.0, 1.0, 1.0, 1.0]),
)

point_dict = al.PointDict(point_dataset_list=[point_dataset])

point_dict.output_to_json(file_path=path.join(dataset_path, "point_dict.json"),
                          overwrite=True)
"""
__Imaging__

We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.via_tracer_from(tracer=tracer, grid=grid)
コード例 #10
0
    def test__figure_of_merit__includes_fit_fluxes(self, positions_x2,
                                                   positions_x2_noise_map,
                                                   fluxes_x2,
                                                   fluxes_x2_noise_map):

        point_dataset = al.PointDataset(
            name="point_0",
            positions=positions_x2,
            positions_noise_map=positions_x2_noise_map,
            fluxes=fluxes_x2,
            fluxes_noise_map=fluxes_x2_noise_map,
        )

        point_dict = al.PointDict(point_dataset_list=[point_dataset])

        model = af.Collection(galaxies=af.Collection(lens=al.Galaxy(
            redshift=0.5,
            sis=al.mp.SphIsothermal(einstein_radius=1.0),
            point_0=al.ps.PointFlux(flux=1.0),
        )))

        solver = al.m.MockPointSolver(model_positions=positions_x2)

        analysis = al.AnalysisPoint(point_dict=point_dict, solver=solver)

        instance = model.instance_from_unit_vector([])

        analysis_log_likelihood = analysis.log_likelihood_function(
            instance=instance)

        tracer = analysis.tracer_via_instance_from(instance=instance)

        fit_positions = al.FitPositionsImage(
            name="point_0",
            positions=positions_x2,
            noise_map=positions_x2_noise_map,
            tracer=tracer,
            point_solver=solver,
        )

        fit_fluxes = al.FitFluxes(
            name="point_0",
            fluxes=fluxes_x2,
            noise_map=fluxes_x2_noise_map,
            positions=positions_x2,
            tracer=tracer,
        )

        assert (fit_positions.log_likelihood +
                fit_fluxes.log_likelihood == analysis_log_likelihood)

        model_positions = al.Grid2DIrregular([(0.0, 1.0), (1.0, 2.0)])
        solver = al.m.MockPointSolver(model_positions=model_positions)

        analysis = al.AnalysisPoint(point_dict=point_dict, solver=solver)

        instance = model.instance_from_unit_vector([])
        analysis_log_likelihood = analysis.log_likelihood_function(
            instance=instance)

        fit_positions = al.FitPositionsImage(
            name="point_0",
            positions=positions_x2,
            noise_map=positions_x2_noise_map,
            tracer=tracer,
            point_solver=solver,
        )

        fit_fluxes = al.FitFluxes(
            name="point_0",
            fluxes=fluxes_x2,
            noise_map=fluxes_x2_noise_map,
            positions=positions_x2,
            tracer=tracer,
        )

        assert fit_positions.residual_map.in_list == [1.0, 1.0]
        assert fit_positions.chi_squared == 2.0
        assert (fit_positions.log_likelihood +
                fit_fluxes.log_likelihood == analysis_log_likelihood)
コード例 #11
0
significantly higher precision than the grid we see the image on. In this example, the grid has a pixel scale of 0.05",
however we determine our multiple image positions at scales of 0.01"!

__Lens Modeling__

**PyAutoLens** has full support for modeling strong lens datasets as a point-source. This might be used for analysing
strongly lensed quasars or supernovae, which are so compact we do not observe their extended emission.

To perform point-source modeling, we first create a ``PointDataset`` containing the image-plane (y,x) positions
of each multiple image and their noise values (which would be the resolution of the imaging data they are observed). 

The positions below correspond to those of an `EllIsothermal` mass model.
"""
point_dataset = al.PointDataset(
    name="point_0",
    positions=al.Grid2DIrregular([[1.1488, -1.1488], [1.109, 1.109],
                                  [-1.109, -1.109], [-1.1488, 1.1488]]),
    positions_noise_map=al.ValuesIrregular([0.05, 0.05, 0.05, 0.05]),
)
"""
__Point Source Dictionary__

In this simple example we model a single point source, which might correspond to one lensed quasar or supernovae.
However, **PyAutoLens** supports model-fits to datasets with many lensed point-sources, for example in galaxy clusters.

Each point source dataset is therefore passed into a `PointDict` object before the model-fit is performed. For 
this simple example only one dataset is passed in, but in the galaxy-cluster examples you'll see this object makes it
straightforward to model datasets with many lensed sources.
"""
point_dict = al.PointDict(point_dataset_list=[point_dataset])
"""
We can print the `positions` of this dictionary and dataset, as well as their noise-map values.
コード例 #12
0
mat_plot_2d = aplt.MatPlot2D(
    output=aplt.Output(path=dataset_path, format="png"))

tracer_plotter = aplt.TracerPlotter(tracer=tracer,
                                    grid=grid,
                                    mat_plot_2d=mat_plot_2d)
tracer_plotter.subplot_tracer()
"""
Create a point-source dictionary data object and output this to a `.json` file, which is the format used to load and
analyse the dataset.
"""
point_dataset_0 = al.PointDataset(
    name="point_0",
    positions=positions_0,
    positions_noise_map=positions_0.values_from_value(value=grid.pixel_scale),
    fluxes=fluxes_0,
    fluxes_noise_map=al.ValuesIrregular(values=[1.0, 1.0, 1.0, 1.0]),
)
point_dataset_1 = al.PointDataset(
    name="point_1",
    positions=positions_1,
    positions_noise_map=positions_1.values_from_value(value=grid.pixel_scale),
    fluxes=fluxes_1,
    fluxes_noise_map=al.ValuesIrregular(values=[1.0, 1.0, 1.0, 1.0]),
)

point_dict = al.PointDict(
    point_dataset_list=[point_dataset_0, point_dataset_1])

point_dict.output_to_json(file_path=path.join(dataset_path, "point_dict.json"),
コード例 #13
0
model is used to fit each `PointDataset`, this name is paired with the name of `Point` model and we must therefore 
ensure every source galaxy in our `PointDataset` has a corresponding component in the model.

For the noise of every position, we use the `pixel-scale` of the observed image. Every individual position has its
own noise-map value, so we use this pixel-scale to construct a value of every corresponding position for each source.
"""
point_dataset_list = []

for id, multiple_images in zip(id_per_source, multiple_image_list_per_source):

    total_images = len(multiple_images)

    point_dataset = al.PointDataset(
        name=f"point_{id}",
        positions=multiple_images,
        positions_noise_map=al.ValuesIrregular(
            values=total_images * [image.pixel_scales[0]]
        ),
    )

    point_dataset_list.append(point_dataset)

"""
The `PointDict` is a dictionary representation of every `PointDataset`. This is the data object that is passed 
to **PyAutoLens** perform the model-fit.

Below, we create the `PointDict` and write it to a `.json` file so it can be loaded in our modeling script.
"""
point_dict = al.PointDict(point_dataset_list=point_dataset_list)

point_dict.output_to_json(