def test__stochastic_log_evidences_for_instance(self, masked_imaging_7x7):

        lens_hyper_image = al.Array2D.ones(shape_native=(3, 3),
                                           pixel_scales=0.1)
        lens_hyper_image[4] = 10.0
        source_hyper_image = al.Array2D.ones(shape_native=(3, 3),
                                             pixel_scales=0.1)
        source_hyper_image[4] = 10.0
        hyper_model_image = al.Array2D.full(fill_value=0.5,
                                            shape_native=(3, 3),
                                            pixel_scales=0.1)

        hyper_galaxy_image_path_dict = {
            ("galaxies", "lens"): lens_hyper_image,
            ("galaxies", "source"): source_hyper_image,
        }

        result = mock.MockResult(
            hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
            hyper_model_image=hyper_model_image,
        )

        galaxies = af.ModelInstance()
        galaxies.lens = al.Galaxy(
            redshift=0.5, mass=al.mp.SphIsothermal(einstein_radius=1.0))
        galaxies.source = al.Galaxy(
            redshift=1.0,
            pixelization=al.pix.VoronoiMagnification(shape=(3, 3)),
            regularization=al.reg.Constant(),
        )

        instance = af.ModelInstance()
        instance.galaxies = galaxies

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                      hyper_result=result)

        stochastic_log_evidences = analysis.stochastic_log_evidences_for_instance(
            instance=instance)

        assert stochastic_log_evidences is None

        galaxies.source = al.Galaxy(
            redshift=1.0,
            pixelization=al.pix.VoronoiBrightnessImage(pixels=9),
            regularization=al.reg.Constant(),
        )

        instance = af.ModelInstance()
        instance.galaxies = galaxies

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                      hyper_result=result)

        stochastic_log_evidences = analysis.stochastic_log_evidences_for_instance(
            instance=instance)

        assert stochastic_log_evidences[0] != stochastic_log_evidences[1]
예제 #2
0
    def test__use_border__determines_if_border_pixel_relocation_is_used(
        self, masked_imaging_7x7
    ):

        model = af.Collection(
            galaxies=af.Collection(
                lens=al.Galaxy(
                    redshift=0.5, mass=al.mp.SphIsothermal(einstein_radius=100.0)
                ),
                source=al.Galaxy(
                    redshift=1.0,
                    pixelization=al.pix.Rectangular(shape=(3, 3)),
                    regularization=al.reg.Constant(coefficient=1.0),
                ),
            )
        )

        masked_imaging_7x7 = masked_imaging_7x7.apply_settings(
            settings=al.SettingsImaging(sub_size_inversion=2)
        )

        analysis = al.AnalysisImaging(
            dataset=masked_imaging_7x7,
            settings_pixelization=al.SettingsPixelization(use_border=True),
        )

        analysis.dataset.grid_inversion[4] = np.array([[500.0, 0.0]])

        instance = model.instance_from_unit_vector([])
        tracer = analysis.tracer_for_instance(instance=instance)
        fit = analysis.fit_imaging_for_tracer(
            tracer=tracer, hyper_image_sky=None, hyper_background_noise=None
        )

        assert fit.inversion.linear_obj_list[0].source_grid_slim[4][0] == pytest.approx(
            97.19584, 1.0e-2
        )
        assert fit.inversion.linear_obj_list[0].source_grid_slim[4][1] == pytest.approx(
            -3.699999, 1.0e-2
        )

        analysis = al.AnalysisImaging(
            dataset=masked_imaging_7x7,
            settings_pixelization=al.SettingsPixelization(use_border=False),
        )

        analysis.dataset.grid_inversion[4] = np.array([300.0, 0.0])

        instance = model.instance_from_unit_vector([])
        tracer = analysis.tracer_for_instance(instance=instance)
        fit = analysis.fit_imaging_for_tracer(
            tracer=tracer, hyper_image_sky=None, hyper_background_noise=None
        )

        assert fit.inversion.linear_obj_list[0].source_grid_slim[4][0] == pytest.approx(
            200.0, 1.0e-4
        )
예제 #3
0
    def test__results_include_positions__available_as_property(
            self, analysis_imaging_7x7, masked_imaging_7x7,
            samples_with_result):

        result = res.ResultDataset(
            samples=samples_with_result,
            analysis=analysis_imaging_7x7,
            model=None,
            search=None,
        )

        assert result.positions == None

        analysis = al.AnalysisImaging(
            dataset=masked_imaging_7x7,
            positions=al.Grid2DIrregular([[(1.0, 1.0)]]),
            settings_lens=al.SettingsLens(positions_threshold=1.0),
        )

        result = res.ResultDataset(samples=samples_with_result,
                                   analysis=analysis,
                                   model=None,
                                   search=None)

        assert (result.positions[0] == np.array([1.0, 1.0])).all()
    def test__sets_up_hyper_galaxy_images__froms(self, masked_imaging_7x7):

        hyper_galaxy_image_path_dict = {
            ("galaxies", "lens"):
            al.Array2D.ones(shape_native=(3, 3), pixel_scales=1.0),
            ("galaxies", "source"):
            al.Array2D.full(fill_value=2.0,
                            shape_native=(3, 3),
                            pixel_scales=1.0),
        }

        result = mock.MockResult(
            hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
            hyper_model_image=al.Array2D.full(fill_value=3.0,
                                              shape_native=(3, 3),
                                              pixel_scales=1.0),
        )

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                      hyper_dataset_result=result)

        assert (analysis.hyper_galaxy_image_path_dict[(
            "galaxies", "lens")].native == np.ones((3, 3))).all()

        assert (analysis.hyper_galaxy_image_path_dict[(
            "galaxies", "source")].native == 2.0 * np.ones((3, 3))).all()

        assert (analysis.hyper_model_image.native == 3.0 * np.ones(
            (3, 3))).all()
    def test__figure_of_merit__includes_hyper_image_and_noise__matches_fit(
            self, masked_imaging_7x7):

        hyper_image_sky = al.hyper_data.HyperImageSky(sky_scale=1.0)
        hyper_background_noise = al.hyper_data.HyperBackgroundNoise(
            noise_scale=1.0)

        lens_galaxy = al.Galaxy(redshift=0.5,
                                light=al.lp.EllSersic(intensity=0.1))

        model = af.Collection(
            hyper_image_sky=hyper_image_sky,
            hyper_background_noise=hyper_background_noise,
            galaxies=af.Collection(lens=lens_galaxy),
        )

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)
        instance = model.instance_from_unit_vector([])
        analysis_log_likelihood = analysis.log_likelihood_function(
            instance=instance)

        tracer = analysis.tracer_for_instance(instance=instance)
        fit = al.FitImaging(
            dataset=masked_imaging_7x7,
            tracer=tracer,
            hyper_image_sky=hyper_image_sky,
            hyper_background_noise=hyper_background_noise,
        )

        assert fit.log_likelihood == analysis_log_likelihood
예제 #6
0
    def test__tracer_all_above_weight_gen(self, masked_imaging_7x7, samples,
                                          model):

        path_prefix = "aggregator_tracer_gen"

        database_file = path.join(conf.instance.output_path, "tracer.sqlite")
        result_path = path.join(conf.instance.output_path, path_prefix)

        clean(database_file=database_file, result_path=result_path)

        search = mock.MockSearch(samples=samples,
                                 result=mock.MockResult(model=model,
                                                        samples=samples))
        search.paths = af.DirectoryPaths(path_prefix=path_prefix)
        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)
        search.fit(model=model, analysis=analysis)

        agg = af.Aggregator.from_database(filename=database_file)
        agg.add_directory(directory=result_path)

        tracer_agg = al.agg.TracerAgg(aggregator=agg)
        tracer_pdf_gen = tracer_agg.all_above_weight_gen(minimum_weight=-1.0)
        weight_pdf_gen = tracer_agg.weights_above_gen(minimum_weight=-1.0)

        i = 0

        for (tracer_gen, weight_gen) in zip(tracer_pdf_gen, weight_pdf_gen):

            for tracer in tracer_gen:

                i += 1

                if i == 1:

                    assert tracer.galaxies[0].redshift == 0.5
                    assert tracer.galaxies[0].light.centre == (1.0, 1.0)
                    assert tracer.galaxies[1].redshift == 1.0

                if i == 2:

                    assert tracer.galaxies[0].redshift == 0.5
                    assert tracer.galaxies[0].light.centre == (10.0, 10.0)
                    assert tracer.galaxies[1].redshift == 1.0

            for weight in weight_gen:

                if i == 0:

                    assert weight == 0.0

                if i == 1:

                    assert weight == 1.0

        assert i == 2

        clean(database_file=database_file, result_path=result_path)
    def test__uses_hyper_fit_correctly(self, masked_imaging_7x7):

        galaxies = af.ModelInstance()
        galaxies.lens = al.Galaxy(redshift=0.5,
                                  light=al.lp.EllSersic(intensity=1.0),
                                  mass=al.mp.SphIsothermal)
        galaxies.source = al.Galaxy(redshift=1.0, light=al.lp.EllSersic())

        instance = af.ModelInstance()
        instance.galaxies = galaxies

        lens_hyper_image = al.Array2D.ones(shape_native=(3, 3),
                                           pixel_scales=0.1)
        lens_hyper_image[4] = 10.0
        hyper_model_image = al.Array2D.full(fill_value=0.5,
                                            shape_native=(3, 3),
                                            pixel_scales=0.1)

        hyper_galaxy_image_path_dict = {("galaxies", "lens"): lens_hyper_image}

        result = mock.MockResult(
            hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
            hyper_model_image=hyper_model_image,
        )

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                      hyper_dataset_result=result)

        hyper_galaxy = al.HyperGalaxy(contribution_factor=1.0,
                                      noise_factor=1.0,
                                      noise_power=1.0)

        instance.galaxies.lens.hyper_galaxy = hyper_galaxy

        analysis_log_likelihood = analysis.log_likelihood_function(
            instance=instance)

        g0 = al.Galaxy(
            redshift=0.5,
            light_profile=instance.galaxies.lens.light,
            mass_profile=instance.galaxies.lens.mass,
            hyper_galaxy=hyper_galaxy,
            hyper_model_image=hyper_model_image,
            hyper_galaxy_image=lens_hyper_image,
            hyper_minimum_value=0.0,
        )
        g1 = al.Galaxy(redshift=1.0,
                       light_profile=instance.galaxies.source.light)

        tracer = al.Tracer.from_galaxies(galaxies=[g0, g1])

        fit = al.FitImaging(dataset=masked_imaging_7x7, tracer=tracer)

        assert (fit.tracer.galaxies[0].hyper_galaxy_image == lens_hyper_image
                ).all()
        assert analysis_log_likelihood == fit.log_likelihood
    def test__analysis_no_positions__removes_positions_and_threshold(
            self, masked_imaging_7x7):

        analysis = al.AnalysisImaging(
            dataset=masked_imaging_7x7,
            positions=al.Grid2DIrregular([(1.0, 100.0), (200.0, 2.0)]),
            settings_lens=al.SettingsLens(positions_threshold=0.01),
        )

        assert analysis.no_positions.positions == None
        assert analysis.no_positions.settings_lens.positions_threshold == None
예제 #9
0
def test__positions__likelihood_overwrites__changes_likelihood(
        masked_imaging_7x7):

    lens = al.Galaxy(redshift=0.5, mass=al.mp.SphIsothermal())
    source = al.Galaxy(redshift=1.0, light=al.lp.SphSersic())

    model = af.Collection(galaxies=af.Collection(lens=lens, source=source))

    instance = model.instance_from_unit_vector([])

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)
    analysis_log_likelihood = analysis.log_likelihood_function(
        instance=instance)

    tracer = analysis.tracer_via_instance_from(instance=instance)

    fit = al.FitImaging(dataset=masked_imaging_7x7, tracer=tracer)

    assert fit.log_likelihood == pytest.approx(analysis_log_likelihood, 1.0e-4)
    assert analysis_log_likelihood == pytest.approx(-6258.043397009, 1.0e-4)

    positions_likelihood = al.PositionsLHPenalty(positions=al.Grid2DIrregular([
        (1.0, 100.0), (200.0, 2.0)
    ]),
                                                 threshold=0.01)

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                  positions_likelihood=positions_likelihood)
    analysis_log_likelihood = analysis.log_likelihood_function(
        instance=instance)

    log_likelihood_penalty_base = positions_likelihood.log_likelihood_penalty_base_from(
        dataset=masked_imaging_7x7)
    log_likelihood_penalty = positions_likelihood.log_likelihood_penalty_from(
        tracer=tracer)

    assert analysis_log_likelihood == pytest.approx(
        log_likelihood_penalty_base - log_likelihood_penalty, 1.0e-4)
    assert analysis_log_likelihood == pytest.approx(-22048700558.9052, 1.0e-4)
    def test__make_result__result_imaging_is_returned(self,
                                                      masked_imaging_7x7):

        model = af.Collection(galaxies=af.Collection(galaxy_0=al.Galaxy(
            redshift=0.5)))

        search = mock.MockSearch(name="test_search")

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)

        result = search.fit(model=model, analysis=analysis)

        assert isinstance(result, res.ResultImaging)
예제 #11
0
def test__modify_before_fit__inversion_no_positions_likelihood__raises_exception(
        masked_imaging_7x7):

    lens = al.Galaxy(redshift=0.5, mass=al.mp.SphIsothermal())
    source = al.Galaxy(redshift=1.0,
                       pixelization=al.pix.Rectangular,
                       regularization=al.reg.Constant())

    model = af.Collection(galaxies=af.Collection(lens=lens, source=source))

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)

    with pytest.raises(exc.AnalysisException):
        analysis.modify_before_fit(paths=af.DirectoryPaths(), model=model)

    positions_likelihood = al.PositionsLHPenalty(positions=al.Grid2DIrregular([
        (1.0, 100.0), (200.0, 2.0)
    ]),
                                                 threshold=0.01)

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                  positions_likelihood=positions_likelihood)
    analysis.modify_before_fit(paths=af.DirectoryPaths(), model=model)
예제 #12
0
def test__fit_imaging_all_above_weight_gen(masked_imaging_7x7, samples, model):

    path_prefix = "aggregator_fit_imaging_gen"

    database_file = path.join(conf.instance.output_path, "fit_imaging.sqlite")
    result_path = path.join(conf.instance.output_path, path_prefix)

    clean(database_file=database_file, result_path=result_path)

    search = al.m.MockSearch(samples=samples,
                             result=al.m.MockResult(model=model,
                                                    samples=samples))
    search.paths = af.DirectoryPaths(path_prefix=path_prefix)
    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)
    search.fit(model=model, analysis=analysis)

    agg = af.Aggregator.from_database(filename=database_file)
    agg.add_directory(directory=result_path)

    fit_imaging_agg = al.agg.FitImagingAgg(aggregator=agg)
    fit_imaging_pdf_gen = fit_imaging_agg.all_above_weight_gen_from(
        minimum_weight=-1.0)

    i = 0

    for fit_imaging_gen in fit_imaging_pdf_gen:

        for fit_imaging in fit_imaging_gen:

            i += 1

            if i == 1:

                assert fit_imaging.tracer.galaxies[0].redshift == 0.5
                assert fit_imaging.tracer.galaxies[0].light.centre == (1.0,
                                                                       1.0)
                assert fit_imaging.tracer.galaxies[1].redshift == 1.0

            if i == 2:

                assert fit_imaging.tracer.galaxies[0].redshift == 0.5
                assert fit_imaging.tracer.galaxies[0].light.centre == (10.0,
                                                                       10.0)
                assert fit_imaging.tracer.galaxies[1].redshift == 1.0

    assert i == 2

    clean(database_file=database_file, result_path=result_path)
예제 #13
0
def test__figure_of_merit__matches_correct_fit_given_galaxy_profiles(
        masked_imaging_7x7):
    lens = al.Galaxy(redshift=0.5, light=al.lp.EllSersic(intensity=0.1))

    model = af.Collection(galaxies=af.Collection(lens=lens))

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)
    instance = model.instance_from_unit_vector([])
    analysis_log_likelihood = analysis.log_likelihood_function(
        instance=instance)

    tracer = analysis.tracer_via_instance_from(instance=instance)

    fit = al.FitImaging(dataset=masked_imaging_7x7, tracer=tracer)

    assert fit.log_likelihood == analysis_log_likelihood
예제 #14
0
def test__positions__resample__raises_exception(masked_imaging_7x7):

    model = af.Collection(galaxies=af.Collection(
        lens=al.Galaxy(redshift=0.5, mass=al.mp.SphIsothermal()),
        source=al.Galaxy(redshift=1.0),
    ))

    positions_likelihood = al.PositionsLHResample(positions=al.Grid2DIrregular(
        [(1.0, 100.0), (200.0, 2.0)]),
                                                  threshold=0.01)

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7,
                                  positions_likelihood=positions_likelihood)

    instance = model.instance_from_unit_vector([])

    with pytest.raises(exc.RayTracingException):
        analysis.log_likelihood_function(instance=instance)
예제 #15
0
    def test__positions_do_not_trace_within_threshold__raises_exception(
            self, masked_imaging_7x7):

        model = af.Collection(galaxies=af.Collection(
            lens=al.Galaxy(redshift=0.5, mass=al.mp.SphIsothermal()),
            source=al.Galaxy(redshift=1.0),
        ))

        analysis = al.AnalysisImaging(
            dataset=masked_imaging_7x7,
            positions=al.Grid2DIrregular([(1.0, 100.0), (200.0, 2.0)]),
            settings_lens=al.SettingsLens(positions_threshold=0.01),
        )

        instance = model.instance_from_unit_vector([])

        with pytest.raises(exc.RayTracingException):
            analysis.log_likelihood_function(instance=instance)
예제 #16
0
def test__profile_log_likelihood_function(masked_imaging_7x7):

    lens = al.Galaxy(redshift=0.5, light=al.lp.EllSersic(intensity=0.1))
    source = al.Galaxy(
        redshift=1.0,
        regularization=al.reg.Constant(coefficient=1.0),
        pixelization=al.pix.Rectangular(shape=(3, 3)),
    )

    model = af.Collection(galaxies=af.Collection(lens=lens, source=source))

    instance = model.instance_from_unit_vector([])

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)

    profiling_dict = analysis.profile_log_likelihood_function(
        instance=instance)

    assert "regularization_term_0" in profiling_dict
    assert "log_det_regularization_matrix_term_0" in profiling_dict
예제 #17
0
def test__tracer_randomly_drawn_via_pdf_gen_from(masked_imaging_7x7, samples,
                                                 model):

    path_prefix = "aggregator_tracer_gen"

    database_file = path.join(conf.instance.output_path, "tracer.sqlite")
    result_path = path.join(conf.instance.output_path, path_prefix)

    clean(database_file=database_file, result_path=result_path)

    search = al.m.MockSearch(samples=samples,
                             result=al.m.MockResult(model=model,
                                                    samples=samples))
    search.paths = af.DirectoryPaths(path_prefix=path_prefix)
    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)
    search.fit(model=model, analysis=analysis)

    agg = af.Aggregator.from_database(filename=database_file)
    agg.add_directory(directory=result_path)

    tracer_agg = al.agg.TracerAgg(aggregator=agg)
    tracer_pdf_gen = tracer_agg.randomly_drawn_via_pdf_gen_from(
        total_samples=2)

    i = 0

    for tracer_gen in tracer_pdf_gen:

        for tracer in tracer_gen:

            i += 1

            assert tracer.galaxies[0].redshift == 0.5
            assert tracer.galaxies[0].light.centre == (10.0, 10.0)
            assert tracer.galaxies[1].redshift == 1.0

    assert i == 2

    clean(database_file=database_file, result_path=result_path)
예제 #18
0
def test__make_result__result_imaging_is_returned(masked_imaging_7x7):

    model = af.Collection(galaxies=af.Collection(galaxy_0=al.Galaxy(
        redshift=0.5)))

    instance = model.instance_from_prior_medians()

    samples = al.m.MockSamples(max_log_likelihood_instance=instance)

    search = al.m.MockSearch(name="test_search", samples=samples)

    analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)

    def modify_after_fit(paths: af.DirectoryPaths,
                         model: af.AbstractPriorModel, result: af.Result):
        pass

    analysis.modify_after_fit = modify_after_fit

    result = search.fit(model=model, analysis=analysis)

    assert isinstance(result, ResultImaging)
예제 #19
0
    def test__check_preloads(self, masked_imaging_7x7):

        conf.instance["general"]["test"]["check_preloads"] = True

        lens_galaxy = al.Galaxy(redshift=0.5, light=al.lp.EllSersic(intensity=0.1))

        model = af.Collection(galaxies=af.Collection(lens=lens_galaxy))

        analysis = al.AnalysisImaging(dataset=masked_imaging_7x7)

        instance = model.instance_from_unit_vector([])
        tracer = analysis.tracer_for_instance(instance=instance)
        fit = al.FitImaging(dataset=masked_imaging_7x7, tracer=tracer)

        analysis.preloads.check_via_fit(fit=fit)

        analysis.preloads.blurred_image = fit.blurred_image

        analysis.preloads.check_via_fit(fit=fit)

        analysis.preloads.blurred_image = fit.blurred_image + 1.0

        with pytest.raises(exc.PreloadsException):
            analysis.preloads.check_via_fit(fit=fit)
예제 #20
0
def test__results_include_positions__available_as_property(
        analysis_imaging_7x7, masked_imaging_7x7, samples_with_result):

    result = res.ResultDataset(samples=samples_with_result,
                               analysis=analysis_imaging_7x7,
                               model=None)

    assert result.positions == None

    positions_likelihood = al.PositionsLHResample(positions=al.Grid2DIrregular(
        [(1.0, 100.0), (200.0, 2.0)]),
                                                  threshold=1.0)

    analysis = al.AnalysisImaging(
        dataset=masked_imaging_7x7,
        positions_likelihood=positions_likelihood,
        settings_lens=al.SettingsLens(threshold=1.0),
    )

    result = res.ResultDataset(samples=samples_with_result,
                               analysis=analysis,
                               model=None)

    assert (result.positions[0] == np.array([1.0, 100.0])).all()
    galaxies=af.Collection(
        lens=af.Model(
            al.Galaxy, redshift=0.5, mass=al.mp.EllIsothermal, shear=al.mp.ExternalShear
        ),
        source=af.Model(al.Galaxy, redshift=1.0, bulge=al.lp.EllSersic),
    )
)

search = af.DynestyStatic(
    path_prefix=path_prefix,
    name="search[1]_mass[sie]_source[parametric]",
    unique_tag=dataset_name,
    nlive=50,
)

analysis = al.AnalysisImaging(dataset=imaging)

result_1 = search.fit(model=model, analysis=analysis)

"""
__Model + Search + Analysis + Model-Fit (Search 2)__

We use the results of search 1 to create the lens model fitted in search 2, where:

 - The lens galaxy's total mass distribution is an `EllPowerLaw` with `ExternalShear` [8 parameters: priors 
 initialized from search 1].
 
 - The source galaxy's light is a bulge+disk using two parametric `EllSersic`'s whose centres are shared
 [12 parameters: priors of bulge initialized from search 1].

The number of free parameters and therefore the dimensionality of non-linear parameter space is N=20.
"""
__SOURCE PARAMETRIC PIPELINE (with lens light)__

The SOURCE PARAMETRIC PIPELINE (with lens light) uses three searches to initialize a robust model for the 
source galaxy's light, which in this example:
 
 - Uses a parametric `EllSersic` bulge and `EllExponential` disk with centres aligned for the lens
 galaxy's light.
 
 - Uses an `EllIsothermal` model for the lens's total mass distribution with an `ExternalShear`.

 __Settings__:

 - Mass Centre: Fix the mass profile centre to (0.0, 0.0) (this assumption will be relaxed in the MASS TOTAL PIPELINE).
"""
analysis = al.AnalysisImaging(dataset=imaging)

bulge = af.Model(al.lp.EllSersic)
disk = af.Model(al.lp.EllExponential)
bulge.centre = (0.0, 0.0)
disk.centre = (0.0, 0.0)

source_parametric_results = slam.source_parametric.with_lens_light(
    path_prefix=path_prefix,
    unique_tag=dataset_name,
    analysis=analysis,
    setup_hyper=setup_hyper,
    lens_bulge=bulge,
    lens_disk=disk,
    mass=af.Model(al.mp.EllIsothermal),
    shear=af.Model(al.mp.ExternalShear),
예제 #23
0
    hyper_galaxies_source=False,
    hyper_image_sky=al.hyper_data.HyperImageSky,
    hyper_background_noise=None,
)

"""
__Model-Fits via Searches 1, 2 & 3__

Searches 1, 2 and 3 initialize the lens model by fitting the lens light, then the lens mass + source, and then all
simultaneously. This is identical to the pipeline `chaining/pipelines/light_parametric__mass_total__source_inversion.py`

We can only use hyper-model once we have a good model for the lens and source galaxies, given that it needs hyper-model
images of both of these components to effectively perform tasks like scaling their noise or adapting a pixelization
or regularization pattern to the source's unlensed morphology.
"""
analysis = al.AnalysisImaging(dataset=imaging)

bulge = af.Model(al.lp.EllSersic)
disk = af.Model(al.lp.EllExponential)

bulge.centre = disk.centre

model = af.Collection(
    galaxies=af.Collection(
        lens=af.Model(al.Galaxy, redshift=0.5, bulge=bulge, disk=disk)
    )
)

search = af.DynestyStatic(
    path_prefix=path_prefix,
    name="hyper[1]_light[parametric]",
applied is passed into the `AnalysisImaging` object, ensuring that this is the mask the model-fit uses. 
"""
model = af.Collection(galaxies=af.Collection(
    lens=af.Model(al.Galaxy, redshift=0.5, mass=al.mp.SphIsothermal),
    source=af.Model(al.Galaxy, redshift=1.0, bulge=al.lp.SphExponential),
))

search = af.DynestyStatic(
    path_prefix=path.join("howtolens", "chapter_2"),
    name="tutorial_5_with_custom_mask",
    unique_tag=dataset_name,
    nlive=40,
    number_of_cores=1,
)

analysis = al.AnalysisImaging(dataset=imaging)

search.fit(model=model, analysis=analysis)
"""
__Discussion__

So, we can choose the mask we use in a model-fit. We know that we want the mask to not remove any of the lensed source 
galaxy's light, but is this the 'right' mask? What is the 'right' mask? Maybe we want a bigger mask? a smaller mask?

When it comes to choosing a mask, we are essentially balancing two things: computational run-time and accuracy. When we
use a bigger the mask the model-fit will take longer to perform. Why? Because a bigger mask includes more image-pixels 
in the analysis, and for every additional image-pixel we have to compute its deflection angles, trace it to the source
plane, fit its light, etc.
 
If run-time was not a consideration we would always choose a bigger mask, for two reasons:
"""
__SOURCE PARAMETRIC PIPELINE (with lens light)__

The SOURCE PARAMETRIC PIPELINE (with lens light) uses three searches to initialize a robust model for the 
source galaxy's light, which in this example:

 - Uses a parametric `EllSersic` bulge.

 - Uses an `EllIsothermal` model for the lens's total mass distribution with an `ExternalShear`.

 __Settings__:

 - Mass Centre: Fix the mass profile centre to (0.0, 0.0) (this assumption will be relaxed in the MASS LIGHT DARK 
 PIPELINE).
"""
analysis = al.AnalysisImaging(dataset=imaging)

bulge = af.Model(al.lp.EllSersic)
bulge.centre = (0.0, 0.0)

source_parametric_results = slam.source_parametric.with_lens_light(
    settings_autofit=settings_autofit,
    analysis=analysis,
    setup_hyper=setup_hyper,
    lens_bulge=bulge,
    lens_disk=None,
    mass=af.Model(al.mp.EllIsothermal),
    shear=af.Model(al.mp.ExternalShear),
    source_bulge=af.Model(al.lp.EllSersic),
    mass_centre=(0.0, 0.0),
    redshift_lens=redshift_lens,
"""
We now iterate through the model list, replacing each slope prior with the shared slope prior.
"""
for model in model_list:

    model.galaxies.lens.mass.slope = slope_shared_prior
"""
__Analysis__

For each dataset we now create a corresponding `AnalysisImaging` class, as we are used to doing for `Imaging` data.
"""
analysis_list = []

for masked_imaging in masked_imaging_list:

    analysis = al.AnalysisImaging(dataset=masked_imaging)

    analysis_list.append(analysis)
"""
__Analysis Factors__

Above, we composed a `model_list` consisting of three lens models which each had a shared `slope` prior. We also 
loaded three datasets which we intend to fit with each of these lens models, setting up each in an `Analysis` class 
that defines how the model is used to fit the data.

We now simply pair each lens model to each `Analysis` class, so that **PyAutoLens** knows that: 

- `model_list[0]` fits `masked_imaging_list[0]` via `analysis_list[0]`.
- `model_list[1]` fits `masked_imaging_list[1]` via `analysis_list[1]`.
- `model_list[2]` fits `masked_imaging_list[2]` via `analysis_list[2]`.
To prevent these solutions biasing the model-fit we specify a `position_threshold` of 0.5", which requires that a 
mass model traces the four (y,x) coordinates specified by our positions (that correspond to the brightest regions of the 
lensed source) within 0.5" of one another in the source-plane, else the mass model is discarded and a new 
model is sampled. This removes the unphysical solutions that bias an `Inversion`. 

The threshold of 0.5" is large. For an accurate lens model we would anticipate the positions trace within < 0.01" of
one another. However, we only want the threshold to aid the non-linear with the choice of mass model, but not risk 
removing genuinely physical models.

Position thresholding is described in more detail in the 
script `autolens_workspace/notebooks/imaging/modeling/customize/positions.py`
"""
analysis = al.AnalysisImaging(
    dataset=imaging,
    positions=positions,
    settings_lens=al.SettingsLens(positions_threshold=0.5),
)
"""
__Model-Fit__

We can now begin the model-fit by passing the model and analysis object to the search, which performs a non-linear
search to find which models fit the data with the highest likelihood.

Checkout the output folder for live outputs of the results of the fit, including on-the-fly visualization of the best 
fit model!
"""
result = search.fit(model=model, analysis=analysis)
"""
__Result__
예제 #28
0
bulge.centre = disk.centre

model = af.Collection(
    galaxies=af.Collection(
        lens=af.Model(al.Galaxy, redshift=0.5, bulge=bulge, disk=disk)
    )
)

search = af.DynestyStatic(
    path_prefix=path_prefix,
    name="search[1]_light[parametric]",
    unique_tag=dataset_name,
    nlive=50,
)

analysis = al.AnalysisImaging(dataset=imaging)

result_1 = search.fit(model=model, analysis=analysis)

"""
__Model + Search + Analysis + Model-Fit (Search 2)__

We use the results of search 1 to create the lens model fitted in search 2, where:

 - The lens galaxy's light and stellar mass is an `EllSersic` bulge and `EllExponential` disk [Parameters 
 fixed to results of search 1].

 - The lens galaxy's dark matter mass distribution is a `EllNFWMCRLudlow` whose centre is aligned with the 
 `EllSersic` bulge and stellar mass model above [3 parameters].

 - The lens mass model also includes an `ExternalShear` [2 parameters].
예제 #29
0
    galaxies=af.Collection(
        lens=af.Model(
            al.Galaxy, redshift=0.5, mass=al.mp.EllIsothermal, shear=al.mp.ExternalShear
        ),
        source=af.Model(al.Galaxy, redshift=1.0, bulge=al.lp.EllSersic),
    )
)

search = af.DynestyStatic(
    path_prefix=path.join("howtolens", "chapter_4"),
    name="search[1]_mass[sie]_source[parametric]",
    unique_tag=dataset_name,
    nlive=50,
)

analysis = al.AnalysisImaging(dataset=imaging)

result_1 = search.fit(model=model, analysis=analysis)

"""
__Model + Search + Analysis + Model-Fit (Search 2)__

We use the results of search 1 to create the lens model fitted in search 2, where:

 - The lens galaxy's total mass distribution is an `EllIsothermal` and `ExternalShear` [Parameters fixed to 
 results of search 1].

 - The source-galaxy's light uses a `DelaunayMagnification` pixelization [2 parameters].

 - This pixelization is regularized using a `Constant` scheme [1 parameter]. 
    hyper_galaxies_lens=True,
    hyper_galaxies_source=False,
    hyper_image_sky=al.hyper_data.HyperImageSky,
    hyper_background_noise=None,
)
"""
__Model-Fits via Searches 1, 2 & 3__

Searches 1, 2 and 3 initialize the lens model by fitting the lens light, then the lens mass + source, and then all
simultaneously. This is identical to the pipeline `chaining/pipelines/light_parametric__mass_total__source_inversion.py`

We can only use hyper-model once we have a good model for the lens and source galaxies, given that it needs hyper-model
images of both of these components to effectively perform tasks like scaling their noise or adapting a pixelization
or regularization pattern to the source's unlensed morphology.
"""
analysis = al.AnalysisImaging(dataset=imaging)

bulge = af.Model(al.lp.EllSersic)
disk = af.Model(al.lp.EllExponential)

bulge.centre = disk.centre

model = af.Collection(galaxies=af.Collection(
    lens=af.Model(al.Galaxy, redshift=0.5, bulge=bulge, disk=disk)))

search = af.DynestyStatic(
    path_prefix=path.join("howtolens", "chapter_5"),
    name="hyper[1]_light[parametric]",
    unique_tag=dataset_name,
    nlive=50,
)