bulge.centre.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.3)
bulge.elliptical_comps.elliptical_comps_0 = af.GaussianPrior(mean=0.0, sigma=0.3)
bulge.elliptical_comps.elliptical_comps_1 = af.GaussianPrior(mean=0.0, sigma=0.3)
bulge.intensity = af.GaussianPrior(mean=0.3, sigma=0.3)
bulge.effective_radius = af.GaussianPrior(mean=0.2, sigma=0.2)
bulge.sersic_index = af.GaussianPrior(mean=1.0, sigma=1.0)

lens = af.Model(al.Galaxy, redshift=0.5, mass=mass, shear=shear)
source = af.Model(al.Galaxy, redshift=1.0, bulge=bulge)

model = af.Collection(galaxies=af.Collection(lens=lens, source=source))

search = af.PySwarmsLocal(
    path_prefix=path.join("howtolens", "chapter_optional"),
    name="tutorial_searches_pso",
    unique_tag=dataset_name,
    n_particles=50,
    iters=1000,
)

print(
    "Dynesty has begun running - checkout the workspace/output"
    "  folder for live output of the results, images and lens model."
    "  This Jupyter notebook cell with progress once Dynesty has completed - this could take some time!"
)

result_pso = search.fit(model=model, analysis=analysis)

print("PySwarms has finished run - you may now continue the notebook.")

fit_imaging_plotter = aplt.FitImagingPlotter(fit=result_pso.max_log_likelihood_fit)
Esempio n. 2
0
https://github.com/ljvmiranda921/pyswarms
https://pyswarms.readthedocs.io/en/latest/index.html

**PyAutoFit** extends *PySwarms* by allowing runs to be terminated and resumed from the point of termination, as well
as providing different options for the initial distribution of particles.

"""

# %%
pso = af.PySwarmsLocal(
    n_particles=100,
    iters=1000,
    cognitive=0.5,
    social=0.3,
    inertia=0.9,
    ftol=-np.inf,
    initializer=af.InitializerPrior(),
    number_of_cores=1,
    paths=af.Paths(folders=["examples", "complex"]),
)
result = pso.fit(model=model, analysis=analysis)

# %%
"""
__Result__

The result object returned by PSO is again very similar in structure to previous results.
"""

# %%
Esempio n. 3
0
    def test__loads_from_config_file_correct(self):
        pso = af.PySwarmsGlobal(
            prior_passer=af.PriorPasser(sigma=2.0,
                                        use_errors=False,
                                        use_widths=False),
            n_particles=51,
            iters=2001,
            cognitive=0.4,
            social=0.5,
            inertia=0.6,
            initializer=af.InitializerBall(lower_limit=0.2, upper_limit=0.8),
            iterations_per_update=10,
            number_of_cores=2,
        )

        assert pso.prior_passer.sigma == 2.0
        assert pso.prior_passer.use_errors is False
        assert pso.prior_passer.use_widths is False
        assert pso.config_dict_search["n_particles"] == 51
        assert pso.config_dict_search["cognitive"] == 0.4
        assert pso.config_dict_run["iters"] == 2001
        assert isinstance(pso.initializer, af.InitializerBall)
        assert pso.initializer.lower_limit == 0.2
        assert pso.initializer.upper_limit == 0.8
        assert pso.iterations_per_update == 10
        assert pso.number_of_cores == 2

        pso = af.PySwarmsGlobal()

        assert pso.prior_passer.sigma == 3.0
        assert pso.prior_passer.use_errors is True
        assert pso.prior_passer.use_widths is True
        assert pso.config_dict_search["n_particles"] == 50
        assert pso.config_dict_search["cognitive"] == 0.1
        assert pso.config_dict_run["iters"] == 2000
        assert isinstance(pso.initializer, af.InitializerPrior)
        assert pso.iterations_per_update == 11
        assert pso.number_of_cores == 1

        pso = af.PySwarmsLocal(
            prior_passer=af.PriorPasser(sigma=2.0,
                                        use_errors=False,
                                        use_widths=False),
            n_particles=51,
            iters=2001,
            cognitive=0.4,
            social=0.5,
            inertia=0.6,
            number_of_k_neighbors=4,
            minkowski_p_norm=1,
            initializer=af.InitializerBall(lower_limit=0.2, upper_limit=0.8),
            iterations_per_update=10,
            number_of_cores=2,
        )

        assert pso.prior_passer.sigma == 2.0
        assert pso.prior_passer.use_errors is False
        assert pso.prior_passer.use_widths is False
        assert pso.config_dict_search["n_particles"] == 51
        assert pso.config_dict_search["cognitive"] == 0.4
        assert pso.config_dict_run["iters"] == 2001
        assert isinstance(pso.initializer, af.InitializerBall)
        assert pso.initializer.lower_limit == 0.2
        assert pso.initializer.upper_limit == 0.8
        assert pso.iterations_per_update == 10
        assert pso.number_of_cores == 2

        pso = af.PySwarmsLocal()

        assert pso.prior_passer.sigma == 3.0
        assert pso.prior_passer.use_errors is True
        assert pso.prior_passer.use_widths is True
        assert pso.config_dict_search["n_particles"] == 50
        assert pso.config_dict_search["cognitive"] == 0.1
        assert pso.config_dict_run["iters"] == 2000
        assert isinstance(pso.initializer, af.InitializerPrior)
        assert pso.iterations_per_update == 11
        assert pso.number_of_cores == 1
parameter in 1D and 2D.
"""
pyswarms_plotter = aplt.PySwarmsPlotter(samples=result.samples)
pyswarms_plotter.cost_history()
"""
__Search__

We can also use a `PySwarmsLocal` to fit the lens model
"""
search = af.PySwarmsLocal(
    path_prefix=path.join("imaging", "searches"),
    name="PySwarmsLocal",
    unique_tag=dataset_name,
    n_particles=30,
    iters=300,
    cognitive=0.5,
    social=0.3,
    inertia=0.9,
    ftol=-np.inf,
    iterations_per_update=1000,
    number_of_cores=1,
)

result = search.fit(model=model, analysis=analysis)

pyswarms_plotter = aplt.PySwarmsPlotter(samples=result.samples)
pyswarms_plotter.cost_history()
"""
Finish.
"""
"""
__Search__

We now create and run the `PySwarmsLocal` object which acts as our non-linear search. 

We manually specify all of the PySwarms settings, descriptions of which are provided at the following webpage:

 https://pyswarms.readthedocs.io/en/latest/api/pyswarms.single.html#module-pyswarms.single.local_best
"""
pso = af.PySwarmsLocal(
    path_prefix="searches",
    name="PySwarmsLocal",
    n_particles=50,
    iters=1000,
    cognitive=0.5,
    social=0.3,
    inertia=0.9,
    number_of_k_neighbors=3,
    minkowski_p_norm=2,
    ftol=-np.inf,
    iterations_per_update=1000,
    number_of_cores=1,
)

result = pso.fit(model=model, analysis=analysis)
"""
__Result__

The result object returned by the fit provides information on the results of the non-linear search. Lets use it to
compare the maximum log likelihood `Gaussian` to the data.
"""
model_data = result.max_log_likelihood_instance.profile_from_xvalues(
Esempio n. 6
0
Unlike Dynesty, this algorithm requires us to specify how many iterations it should perform to find the global 
maxima solutions. Here, an iteration is the number of samples performed by every particle, so the total number of
iterations is n_particles * iters. Lets try a total of ? iterations, a factor ? less than our Dynesty runs above. 
"""

# %%
phase_pso = al.PhaseImaging(
    phase_name="phase_t7_pso",
    settings=settings,
    galaxies=dict(
        lens=al.GalaxyModel(
            redshift=0.5, light=al.lp.EllipticalSersic, mass=al.mp.EllipticalIsothermal
        ),
        source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
    ),
    search=af.PySwarmsLocal(n_particles=50, iters=1000),
)

print(
    "Dynesty has begun running - checkout the workspace/output"
    " folder for live output of the results, images and lens model."
    " This Jupyter notebook cell with progress once Dynesty has completed - this could take some time!"
)

# result_pso = phase_pso.run(dataset=imaging, mask=mask)

print("PySwarms has finished run - you may now continue the notebook.")

# # aplt.FitImaging.subplot_fit_imaging(fit=result_pso.max_log_likelihood_fit)

# %%
wasting time mapping out in intricate detail every facet of parameter space. Lets see how much faster we can find a 
good fit to the lens data using an optimizer.

we'll use the `Particle Swarm Optimizer` PySwarms. Conceptually this works quite similar to Dynesty, it has a set of 
points in parameter space (called `particles`) and it uses their likelihoods to determine where it thinks the higher
likelihood regions of parameter space are. 

Unlike Dynesty, this algorithm requires us to specify how many iterations it should perform to find the global 
maxima solutions. Here, an iteration is the number of samples performed by every particle, so the total number of
iterations is n_particles * iters. Lets try a total of ? iterations, a factor ? less than our Dynesty runs above. 
"""

# %%
phase_pso = al.PhaseImaging(
    search=af.PySwarmsLocal(path_prefix="howtolens",
                            name="phase_t6_pso",
                            n_particles=50,
                            iters=1000),
    settings=settings,
    galaxies=af.CollectionPriorModel(
        lens=al.GalaxyModel(redshift=0.5,
                            bulge=al.lp.EllipticalSersic,
                            mass=al.mp.EllipticalIsothermal),
        source=al.GalaxyModel(redshift=1.0, bulge=al.lp.EllipticalSersic),
    ),
)

print(
    "Dynesty has begun running - checkout the workspace/output"
    "  folder for live output of the results, images and lens model."
    "  This Jupyter notebook cell with progress once Dynesty has completed - this could take some time!"
)
Esempio n. 8
0
we'll use the Particle Swarm Optimization algorithm PySwarms. For a full description of PySwarms, checkout its Github 
and readthedocs webpages:

https://github.com/ljvmiranda921/pyswarms
https://pyswarms.readthedocs.io/en/latest/index.html

**PyAutoFit** extends *PySwarms* by allowing runs to be terminated and resumed from the point of termination, as well
as providing different options for the initial distribution of particles.

"""
pso = af.PySwarmsLocal(
    path_prefix=path_prefix,
    name="PySwarmsLocal",
    n_particles=100,
    iters=1000,
    cognitive=0.5,
    social=0.3,
    inertia=0.9,
    ftol=-np.inf,
    initializer=af.InitializerPrior(),
    number_of_cores=1,
)
result = pso.fit(model=model, analysis=analysis)
"""
__Result__

The result object returned by PSO is again very similar in structure to previous results.
"""
instance = result.max_log_likelihood_instance

model_gaussian = instance.gaussian.profile_1d_via_xvalues_from(
    xvalues=np.arange(data.shape[0]))
we'll use the Particle Swarm Optimization algorithm PySwarms. For a full description of PySwarms, checkout its Github 
and readthedocs webpages:

https://github.com/ljvmiranda921/pyswarms
https://pyswarms.readthedocs.io/en/latest/index.html

**PyAutoFit** extends *PySwarms* by allowing runs to be terminated and resumed from the point of termination, as well
as providing different options for the initial distribution of particles.

"""
pso = af.PySwarmsLocal(
    path_prefix=path.join("overview", "complex", "fit"),
    n_particles=100,
    iters=1000,
    cognitive=0.5,
    social=0.3,
    inertia=0.9,
    ftol=-np.inf,
    initializer=af.InitializerPrior(),
    number_of_cores=1,
    session=session,
)
result = pso.fit(model=model, analysis=analysis)
"""
__Result__

The result object returned by PSO is again very similar in structure to previous results.
"""
instance = result.max_log_likelihood_instance

model_gaussian = instance.gaussian.profile_from_xvalues(
    xvalues=np.arange(data.shape[0]))