Beispiel #1
0
    def test__one_set_of_fluxes__residuals_likelihood_correct(self):

        tracer = mock.MockTracer(
            magnification=al.ValuesIrregular([2.0, 2.0]),
            profile=al.ps.PointSourceFlux(flux=2.0),
        )

        fluxes = al.ValuesIrregular([1.0, 2.0])
        noise_map = al.ValuesIrregular([3.0, 1.0])
        positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])

        fit = al.FitFluxes(
            name="point_0",
            fluxes=fluxes,
            noise_map=noise_map,
            positions=positions,
            tracer=tracer,
        )

        assert fit.fluxes.in_list == [1.0, 2.0]
        assert fit.noise_map.in_list == [3.0, 1.0]
        assert fit.model_fluxes.in_list == [4.0, 4.0]
        assert fit.residual_map.in_list == [-3.0, -2.0]
        assert fit.normalized_residual_map.in_list == [-1.0, -2.0]
        assert fit.chi_squared_map.in_list == [1.0, 4.0]
        assert fit.chi_squared == pytest.approx(5.0, 1.0e-4)
        assert fit.noise_normalization == pytest.approx(5.87297, 1.0e-4)
        assert fit.log_likelihood == pytest.approx(-5.43648, 1.0e-4)
Beispiel #2
0
    def test__furthest_separation_of_source_plane_positions(self):

        positions = al.Grid2DIrregular(grid=[(0.0, 0.0), (0.0, 1.0)])
        noise_map = al.ValuesIrregular([[1.0, 1.0]])

        tracer = mock.MockTracer(traced_grid=positions)
        fit = al.FitPositionsSourceMaxSeparation(positions=positions,
                                                 noise_map=noise_map,
                                                 tracer=tracer)

        assert fit.furthest_separations_of_source_plane_positions.in_list == [
            1.0, 1.0
        ]
        assert fit.max_separation_of_source_plane_positions == 1.0
        assert fit.max_separation_within_threshold(threshold=2.0) == True
        assert fit.max_separation_within_threshold(threshold=0.5) == False

        positions = al.Grid2DIrregular(grid=[(0.0, 0.0), (0.0, 1.0), (0.0,
                                                                      3.0)])
        noise_map = al.ValuesIrregular([1.0, 1.0, 1.0])

        tracer = mock.MockTracer(traced_grid=positions)
        fit = al.FitPositionsSourceMaxSeparation(positions=positions,
                                                 noise_map=noise_map,
                                                 tracer=tracer)

        assert fit.furthest_separations_of_source_plane_positions.in_list == [
            3.0,
            2.0,
            3.0,
        ]
        assert fit.max_separation_of_source_plane_positions == 3.0
        assert fit.max_separation_within_threshold(threshold=3.5) == True
        assert fit.max_separation_within_threshold(threshold=2.0) == False
        assert fit.max_separation_within_threshold(threshold=0.5) == False
def make_dataset():
    return al.PointSourceDataset(
        "name",
        positions=al.Grid2DIrregular([(1, 2)]),
        positions_noise_map=al.ValuesIrregular([1]),
        fluxes=al.ValuesIrregular([2]),
        fluxes_noise_map=al.ValuesIrregular([3]),
    )
Beispiel #4
0
    def test__multi_plane_calculation(self, gal_x1_mp):

        g0 = al.Galaxy(redshift=0.5,
                       mass=al.mp.SphIsothermal(einstein_radius=1.0))
        g1 = al.Galaxy(redshift=1.0, point_0=al.ps.PointSourceFlux(flux=1.0))
        g2 = al.Galaxy(redshift=2.0, point_1=al.ps.PointSourceFlux(flux=2.0))

        tracer = al.Tracer.from_galaxies(galaxies=[g0, g1, g2])

        fluxes = al.ValuesIrregular([1.0])
        noise_map = al.ValuesIrregular([3.0])
        positions = al.Grid2DIrregular([(2.0, 0.0)])

        fit_0 = al.FitFluxes(
            name="point_0",
            fluxes=fluxes,
            noise_map=noise_map,
            positions=positions,
            tracer=tracer,
        )

        deflections_func = partial(tracer.deflections_between_planes_from_grid,
                                   plane_i=0,
                                   plane_j=1)

        magnification_0 = tracer.magnification_via_hessian_from_grid(
            grid=positions, deflections_func=deflections_func)

        assert fit_0.magnifications[0] == magnification_0

        fit_1 = al.FitFluxes(
            name="point_1",
            fluxes=fluxes,
            noise_map=noise_map,
            positions=positions,
            tracer=tracer,
        )

        deflections_func = partial(tracer.deflections_between_planes_from_grid,
                                   plane_i=0,
                                   plane_j=2)

        magnification_1 = tracer.magnification_via_hessian_from_grid(
            grid=positions, deflections_func=deflections_func)

        assert fit_1.magnifications[0] == magnification_1

        assert fit_0.magnifications[0] != pytest.approx(
            fit_1.magnifications[0], 1.0e-1)
Beispiel #5
0
    def test__same_as_above_with_real_tracer(self):

        tracer = al.Tracer.from_galaxies(
            galaxies=[
                al.Galaxy(
                    redshift=0.5, mass=al.mp.SphericalIsothermal(einstein_radius=1.0)
                ),
                al.Galaxy(redshift=1.0),
            ]
        )

        noise_map = al.ValuesIrregular([1.0, 1.0])

        positions = al.Grid2DIrregular([(1.0, 0.0), (-1.0, 0.0)])
        fit = al.FitPositionsSourceMaxSeparation(
            positions=positions, noise_map=noise_map, tracer=tracer
        )
        assert fit.max_separation_within_threshold(threshold=0.01)

        positions = al.Grid2DIrregular([(1.2, 0.0), (-1.0, 0.0)])
        fit = al.FitPositionsSourceMaxSeparation(
            positions=positions, noise_map=noise_map, tracer=tracer
        )
        assert fit.max_separation_within_threshold(threshold=0.3)
        assert not fit.max_separation_within_threshold(threshold=0.15)
Beispiel #6
0
def test__multi_plane_position_solving():

    g0 = al.Galaxy(redshift=0.5, mass=al.mp.SphIsothermal(einstein_radius=1.0))
    g1 = al.Galaxy(redshift=1.0, point_0=al.ps.Point(centre=(0.1, 0.1)))
    g2 = al.Galaxy(redshift=2.0, point_1=al.ps.Point(centre=(0.1, 0.1)))

    tracer = al.Tracer.from_galaxies(galaxies=[g0, g1, g2])

    positions = al.Grid2DIrregular([(0.0, 1.0), (0.0, 2.0)])
    noise_map = al.ValuesIrregular([0.5, 1.0])

    traced_grids = tracer.traced_grid_2d_list_from(grid=positions)

    fit_0 = al.FitPositionsSource(name="point_0",
                                  positions=positions,
                                  noise_map=noise_map,
                                  tracer=tracer)

    assert fit_0.model_positions[0, 1] == pytest.approx(0.326054, 1.0e-1)
    assert fit_0.model_positions[1, 1] == pytest.approx(1.326054, 1.0e-1)

    assert (fit_0.model_positions == traced_grids[1]).all()

    fit_1 = al.FitPositionsSource(name="point_1",
                                  positions=positions,
                                  noise_map=noise_map,
                                  tracer=tracer)

    assert (fit_1.model_positions == traced_grids[2]).all()
Beispiel #7
0
    def test__more_model_positions_than_data_positions__pairs_closest_positions(
            self):

        g0 = al.Galaxy(redshift=1.0,
                       point_0=al.ps.PointSource(centre=(0.1, 0.1)))

        tracer = al.Tracer.from_galaxies(
            galaxies=[al.Galaxy(redshift=0.5), g0])

        positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
        noise_map = al.ValuesIrregular([0.5, 1.0])
        model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0),
                                              (1.0, 0.0), (0.0, 1.0)])

        positions_solver = mock.MockPositionsSolver(
            model_positions=model_positions)

        fit = al.FitPositionsImage(
            name="point_0",
            positions=positions,
            noise_map=noise_map,
            tracer=tracer,
            positions_solver=positions_solver,
        )

        assert fit.model_positions.in_list == [(1.0, 0.0), (2.0, 3.0)]
        assert fit.noise_map.in_list == [0.5, 1.0]
        assert fit.residual_map.in_list == [1.0, np.sqrt(2.0)]
        assert fit.normalized_residual_map.in_list == [2.0, np.sqrt(2.0) / 1.0]
        assert fit.chi_squared_map.in_list == [4.0, np.sqrt(2.0)**2.0]
        assert fit.chi_squared == pytest.approx(6.0, 1.0e-4)
        assert fit.noise_normalization == pytest.approx(2.289459, 1.0e-4)
        assert fit.log_likelihood == pytest.approx(-4.144729, 1.0e-4)
Beispiel #8
0
    def test__use_real_tracer(self, gal_x1_mp):

        point_source = al.ps.PointSourceFlux(centre=(0.1, 0.1), flux=2.0)
        galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)
        tracer = al.Tracer.from_galaxies(galaxies=[gal_x1_mp, galaxy_point_source])

        fluxes = al.ValuesIrregular([1.0, 2.0])
        noise_map = al.ValuesIrregular([3.0, 1.0])
        positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])

        fit = al.FitFluxes(
            fluxes=fluxes, noise_map=noise_map, positions=positions, tracer=tracer
        )

        assert fit.model_fluxes.in_list[1] == pytest.approx(2.5, 1.0e-4)
        assert fit.log_likelihood == pytest.approx(-3.11702, 1.0e-4)
Beispiel #9
0
def test__from_json_and_output_to_json():

    point_dataset_0 = al.PointDataset(
        name="source_1",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
    )

    point_dataset_1 = al.PointDataset(
        name="source_2",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
        fluxes=al.ValuesIrregular([2.0, 3.0]),
        fluxes_noise_map=al.ValuesIrregular([4.0, 5.0]),
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])

    dir_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "files")

    if path.exists(dir_path):
        shutil.rmtree(dir_path)

    os.makedirs(dir_path)

    file_path = path.join(dir_path, "point_dict.json")

    point_dict.output_to_json(file_path=file_path, overwrite=True)

    point_dict_via_json = al.PointDict.from_json(file_path=file_path)

    assert point_dict_via_json["source_1"].name == "source_1"
    assert point_dict_via_json["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict_via_json["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict_via_json["source_1"].fluxes == None
    assert point_dict_via_json["source_1"].fluxes_noise_map == None

    assert point_dict_via_json["source_2"].name == "source_2"
    assert point_dict_via_json["source_2"].positions.in_list == [(1.0, 1.0)]
    assert point_dict_via_json["source_2"].positions_noise_map.in_list == [1.0]
    assert point_dict_via_json["source_2"].fluxes.in_list == [2.0, 3.0]
    assert point_dict_via_json["source_2"].fluxes_noise_map.in_list == [4.0, 5.0]
Beispiel #10
0
def test__point_dataset_structures_as_dict():

    point_dataset_0 = al.PointDataset(
        name="source_1",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0])

    assert point_dict["source_1"].name == "source_1"
    assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_1"].fluxes == None
    assert point_dict["source_1"].fluxes_noise_map == None

    point_dataset_1 = al.PointDataset(
        name="source_2",
        positions=al.Grid2DIrregular([[1.0, 1.0]]),
        positions_noise_map=al.ValuesIrregular([1.0]),
        fluxes=al.ValuesIrregular([2.0, 3.0]),
        fluxes_noise_map=al.ValuesIrregular([4.0, 5.0]),
    )

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])

    assert point_dict["source_1"].name == "source_1"
    assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_1"].fluxes == None
    assert point_dict["source_1"].fluxes_noise_map == None

    assert point_dict["source_2"].name == "source_2"
    assert point_dict["source_2"].positions.in_list == [(1.0, 1.0)]
    assert point_dict["source_2"].positions_noise_map.in_list == [1.0]
    assert point_dict["source_2"].fluxes.in_list == [2.0, 3.0]
    assert point_dict["source_2"].fluxes_noise_map.in_list == [4.0, 5.0]

    assert (point_dict.positions_list[0] == np.array([1.0, 1.0])).all()
    assert (point_dict.positions_list[1] == np.array([1.0, 1.0])).all()
Beispiel #11
0
    def test__multi_plane_position_solving(self):

        grid = al.Grid2D.uniform(shape_native=(100, 100),
                                 pixel_scales=0.05,
                                 sub_size=1)

        g0 = al.Galaxy(redshift=0.5,
                       mass=al.mp.SphIsothermal(einstein_radius=1.0))
        g1 = al.Galaxy(redshift=1.0,
                       point_0=al.ps.PointSource(centre=(0.1, 0.1)))
        g2 = al.Galaxy(redshift=2.0,
                       point_1=al.ps.PointSource(centre=(0.1, 0.1)))

        tracer = al.Tracer.from_galaxies(galaxies=[g0, g1, g2])

        positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
        noise_map = al.ValuesIrregular([0.5, 1.0])

        positions_solver = al.PositionsSolver(grid=grid,
                                              pixel_scale_precision=0.01)

        fit_0 = al.FitPositionsImage(
            name="point_0",
            positions=positions,
            noise_map=noise_map,
            tracer=tracer,
            positions_solver=positions_solver,
        )

        fit_1 = al.FitPositionsImage(
            name="point_1",
            positions=positions,
            noise_map=noise_map,
            tracer=tracer,
            positions_solver=positions_solver,
        )

        scaling_factor = al.util.cosmology.scaling_factor_between_redshifts_from(
            redshift_0=0.5,
            redshift_1=1.0,
            redshift_final=2.0,
            cosmology=tracer.cosmology,
        )

        assert fit_0.model_positions[0, 0] == pytest.approx(
            scaling_factor * fit_1.model_positions[0, 0], 1.0e-1)
        assert fit_0.model_positions[0, 1] == pytest.approx(
            scaling_factor * fit_1.model_positions[0, 1], 1.0e-1)
Beispiel #12
0
def test__fits_dataset__positions_only():

    point_source = al.ps.Point(centre=(0.1, 0.1))
    galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)

    tracer = al.Tracer.from_galaxies(
        galaxies=[al.Galaxy(redshift=0.5), galaxy_point_source])

    positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
    noise_map = al.ValuesIrregular([0.5, 1.0])
    model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])

    point_solver = al.m.MockPointSolver(model_positions=model_positions)

    point_dataset_0 = al.PointDataset(name="point_0",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dict = al.PointDict(point_dataset_list=[point_dataset_0])

    fit = al.FitPointDict(point_dict=point_dict,
                          tracer=tracer,
                          point_solver=point_solver)

    assert fit["point_0"].positions.log_likelihood == pytest.approx(
        -22.14472, 1.0e-4)
    assert fit["point_0"].flux == None
    assert fit.log_likelihood == fit["point_0"].positions.log_likelihood

    point_dataset_1 = al.PointDataset(name="point_1",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dict = al.PointDict(
        point_dataset_list=[point_dataset_0, point_dataset_1])

    fit = al.FitPointDict(point_dict=point_dict,
                          tracer=tracer,
                          point_solver=point_solver)

    assert fit["point_0"].positions.log_likelihood == pytest.approx(
        -22.14472, 1.0e-4)
    assert fit["point_0"].flux == None
    assert fit["point_1"].positions == None
    assert fit["point_1"].flux == None
    assert fit.log_likelihood == fit["point_0"].positions.log_likelihood
Beispiel #13
0
def test__model_has_image_and_source_chi_squared__fits_both_correctly():

    galaxy_point_image = al.Galaxy(redshift=1.0,
                                   point_0=al.ps.Point(centre=(0.1, 0.1)))

    galaxy_point_source = al.Galaxy(redshift=1.0,
                                    point_1=al.ps.PointSourceChi(centre=(0.1,
                                                                         0.1)))

    tracer = al.Tracer.from_galaxies(galaxies=[
        al.Galaxy(redshift=0.5), galaxy_point_image, galaxy_point_source
    ])

    positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
    noise_map = al.ValuesIrregular([0.5, 1.0])
    model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])

    point_solver = al.m.MockPointSolver(model_positions=model_positions)

    point_dataset_0 = al.PointDataset(name="point_0",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dataset_1 = al.PointDataset(name="point_1",
                                      positions=positions,
                                      positions_noise_map=noise_map)

    point_dict = al.PointDict(
        point_dataset_list=[point_dataset_0, point_dataset_1])

    fit = al.FitPointDict(point_dict=point_dict,
                          tracer=tracer,
                          point_solver=point_solver)

    assert isinstance(fit["point_0"].positions, al.FitPositionsImage)
    assert isinstance(fit["point_1"].positions, al.FitPositionsSource)

    assert fit[
        "point_0"].positions.model_positions.in_list == model_positions.in_list
    assert fit[
        "point_1"].positions.model_positions.in_list == positions.in_list
    def test__two_sets_of_positions__residuals_likelihood_correct(self):

        point_source = al.ps.PointSourceChi(centre=(0.0, 0.0))
        galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)
        tracer = al.Tracer.from_galaxies(
            galaxies=[al.Galaxy(redshift=0.5), galaxy_point_source])

        positions = al.Grid2DIrregular([(0.0, 1.0), (0.0, 2.0)])
        noise_map = al.ValuesIrregular([0.5, 1.0])

        fit = al.FitPositionsSource(name="point_0",
                                    positions=positions,
                                    noise_map=noise_map,
                                    tracer=tracer)

        assert fit.model_positions.in_list == [(0.0, 1.0), (0.0, 2.0)]
        assert fit.noise_map.in_list == [0.5, 1.0]
        assert fit.residual_map.in_list == [1.0, 2.0]
        assert fit.normalized_residual_map.in_list == [1.0 / 0.5, 2.0 / 1.0]
        assert fit.chi_squared_map.in_list == [(1.0 / 0.5)**2.0, 2.0**2.0]
        assert fit.chi_squared == pytest.approx(8.0, 1.0e-4)
        assert fit.noise_normalization == pytest.approx(2.28945, 1.0e-4)
        assert fit.log_likelihood == pytest.approx(-5.14472988, 1.0e-4)

        galaxy_mass = al.Galaxy(
            redshift=0.5,
            mass=al.mp.SphIsothermal(centre=(0.0, 0.0), einstein_radius=1.0),
        )

        tracer = al.Tracer.from_galaxies(
            galaxies=[galaxy_mass, galaxy_point_source])

        fit = al.FitPositionsSource(name="point_0",
                                    positions=positions,
                                    noise_map=noise_map,
                                    tracer=tracer)

        assert fit.model_positions.in_list == [(0.0, 0.0), (0.0, 1.0)]
        assert fit.log_likelihood == pytest.approx(-1.6447298, 1.0e-4)
Beispiel #15
0
    def test__two_sets_of_positions__residuals_likelihood_correct(self):

        point_source = al.ps.PointSource(centre=(0.1, 0.1))
        galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)
        tracer = al.Tracer.from_galaxies(
            galaxies=[al.Galaxy(redshift=0.5), galaxy_point_source])

        positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
        noise_map = al.ValuesIrregular([0.5, 1.0])
        model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])

        positions_solver = mock.MockPositionsSolver(
            model_positions=model_positions)

        fit = al.FitPositionsImage(
            name="point_0",
            positions=positions,
            noise_map=noise_map,
            tracer=tracer,
            positions_solver=positions_solver,
        )

        assert fit.model_positions.in_list == [(3.0, 1.0), (2.0, 3.0)]

        assert fit.model_positions.in_list == [(3.0, 1.0), (2.0, 3.0)]

        assert fit.noise_map.in_list == [0.5, 1.0]
        assert fit.residual_map.in_list == [np.sqrt(10.0), np.sqrt(2.0)]
        assert fit.normalized_residual_map.in_list == [
            np.sqrt(10.0) / 0.5,
            np.sqrt(2.0) / 1.0,
        ]
        assert fit.chi_squared_map.in_list == [
            (np.sqrt(10.0) / 0.5)**2,
            np.sqrt(2.0)**2.0,
        ]
        assert fit.chi_squared == pytest.approx(42.0, 1.0e-4)
        assert fit.noise_normalization == pytest.approx(2.28945, 1.0e-4)
        assert fit.log_likelihood == pytest.approx(-22.14472, 1.0e-4)
Beispiel #16
0
positions = al.Grid2DIrregular(grid=[
    positions.in_list[0],
    positions.in_list[2],
    positions.in_list[3],
    positions.in_list[-1],
])
"""
Use the positions to compute the magnification of the `Tracer` at every position.
"""
magnifications = tracer.magnification_2d_via_hessian_from(grid=positions)
"""
We can now compute the observed fluxes of the `Point`, give we know how much each is magnified.
"""
flux = 1.0
fluxes = [flux * np.abs(magnification) for magnification in magnifications]
fluxes = al.ValuesIrregular(values=fluxes)
"""
Create a point-source dictionary data object and output this to a `.json` file, which is the format used to load and
analyse the dataset.
"""
point_dataset = al.PointDataset(
    name="point_0",
    positions=positions,
    positions_noise_map=positions.values_via_value_from(
        value=grid.pixel_scale),
    fluxes=fluxes,
    fluxes_noise_map=al.ValuesIrregular(values=[1.0, 1.0, 1.0, 1.0]),
)

point_dict = al.PointDict(point_dataset_list=[point_dataset])
Beispiel #17
0
    positions_1.in_list[6],
])

print(positions_0)
print(positions_1)
"""
Use the positions to compute the magnification of the `Tracer` at every position.
"""
magnifications_0 = tracer.magnification_2d_via_hessian_from(grid=positions_0)
magnifications_1 = tracer.magnification_2d_via_hessian_from(grid=positions_1)
"""
We can now compute the observed fluxes of the `Point`, give we know how much each is magnified.
"""
flux = 1.0
fluxes_0 = [flux * np.abs(magnification) for magnification in magnifications_0]
fluxes_0 = al.ValuesIrregular(values=fluxes_0)
fluxes_1 = [flux * np.abs(magnification) for magnification in magnifications_1]
fluxes_1 = al.ValuesIrregular(values=fluxes_1)
"""
We now output the image of this strong lens to `.fits` which can be used for visualize when performing point-source 
modeling and to `.png` for general inspection.
"""
visuals_2d = aplt.Visuals2D(multiple_images=[positions_0, positions_1])

tracer_plotter = aplt.TracerPlotter(tracer=tracer,
                                    grid=grid,
                                    visuals_2d=visuals_2d)
tracer_plotter.figures_2d(image=True)

mat_plot_2d = aplt.MatPlot2D(
    output=aplt.Output(path=dataset_path, filename="image_2d", format="fits"))
Beispiel #18
0
def make_fluxes_noise_map_x2():
    return al.ValuesIrregular(values=[1.0, 1.0])
Beispiel #19
0
def make_fluxes_x2():
    return al.ValuesIrregular(values=[1.0, 2.0])
Beispiel #20
0
def make_positions_noise_map_x2():
    return al.ValuesIrregular(values=[1.0, 1.0])
Beispiel #21
0
ensure every source galaxy in our `PointDataset` has a corresponding component in the model.

For the noise of every position, we use the `pixel-scale` of the observed image. Every individual position has its
own noise-map value, so we use this pixel-scale to construct a value of every corresponding position for each source.
"""
point_dataset_list = []

for id, multiple_images in zip(id_per_source, multiple_image_list_per_source):

    total_images = len(multiple_images)

    point_dataset = al.PointDataset(
        name=f"point_{id}",
        positions=multiple_images,
        positions_noise_map=al.ValuesIrregular(
            values=total_images * [image.pixel_scales[0]]
        ),
    )

    point_dataset_list.append(point_dataset)

"""
The `PointDict` is a dictionary representation of every `PointDataset`. This is the data object that is passed 
to **PyAutoLens** perform the model-fit.

Below, we create the `PointDict` and write it to a `.json` file so it can be loaded in our modeling script.
"""
point_dict = al.PointDict(point_dataset_list=point_dataset_list)

point_dict.output_to_json(
    file_path=path.join(dataset_path, "point_dict.json"), overwrite=True
Beispiel #22
0
__Lens Modeling__

**PyAutoLens** has full support for modeling strong lens datasets as a point-source. This might be used for analysing
strongly lensed quasars or supernovae, which are so compact we do not observe their extended emission.

To perform point-source modeling, we first create a ``PointDataset`` containing the image-plane (y,x) positions
of each multiple image and their noise values (which would be the resolution of the imaging data they are observed). 

The positions below correspond to those of an `EllIsothermal` mass model.
"""
point_dataset = al.PointDataset(
    name="point_0",
    positions=al.Grid2DIrregular([[1.1488, -1.1488], [1.109, 1.109],
                                  [-1.109, -1.109], [-1.1488, 1.1488]]),
    positions_noise_map=al.ValuesIrregular([0.05, 0.05, 0.05, 0.05]),
)
"""
__Point Source Dictionary__

In this simple example we model a single point source, which might correspond to one lensed quasar or supernovae.
However, **PyAutoLens** supports model-fits to datasets with many lensed point-sources, for example in galaxy clusters.

Each point source dataset is therefore passed into a `PointDict` object before the model-fit is performed. For 
this simple example only one dataset is passed in, but in the galaxy-cluster examples you'll see this object makes it
straightforward to model datasets with many lensed sources.
"""
point_dict = al.PointDict(point_dataset_list=[point_dataset])
"""
We can print the `positions` of this dictionary and dataset, as well as their noise-map values.
"""