def test__fit_sub_plot_source_and_lens(fit_source_and_lens,
                                                                   lens_fit_plotter_path, plot_patch):
    lens_fit_plotters.plot_fit_subplot(fit=fit_source_and_lens, should_plot_mask=True, extract_array_from_mask=True,
                                       zoom_around_mask=True,
                                       output_path=lens_fit_plotter_path, output_format='png')
    assert lens_fit_plotter_path + 'lens_fit.png' in plot_patch.paths
                                                     intensity=0.03,
                                                     effective_radius=0.3,
                                                     sersic_index=3.5))
source_galaxy_3 = g.Galaxy(light=lp.EllipticalSersic(centre=(-0.05, -0.0),
                                                     axis_ratio=0.9,
                                                     phi=140.0,
                                                     intensity=0.03,
                                                     effective_radius=0.1,
                                                     sersic_index=4.0))

tracer = ray_tracing.TracerImageSourcePlanes(
    lens_galaxies=[lens_galaxy],
    source_galaxies=[
        source_galaxy_0, source_galaxy_1, source_galaxy_2, source_galaxy_3
    ],
    image_plane_grid_stack=lens_data.grid_stack)

true_fit = lens_fit.fit_lens_data_with_tracer(lens_data=lens_data,
                                              tracer=tracer)

lens_fit_plotters.plot_fit_subplot(fit=true_fit)

# And indeed, we see far improved residuals, chi-squareds, etc.

# The morale of this story is that, if the source morphology is complex, there is no way we can build a pipeline to
# fit it. For this tutorial, this was true even though our source model could actually fit the data perfectly. For
# real lenses, the source will be *even more complex* and there is even less hope of getting a good fit :(

# But fear not, PyAutoLens has you covered. In chapter 4, we'll introduce a completely new way to model the source
# galaxy, which addresses the problem faced here. But before that, in the next tutorial we'll discuss how we actually
# pass priors in a pipeline.
# run-time including any resumes.
start = time.time()

# Lets run the phase - the run-time will be output to the output/7_multinest_black_magic/

print(
    'MultiNest has begun running - checkout the workspace/howtolens/chapter_2_lens_modeling/output/7_multinest_black_magic'
    ' folder for live output of the results, images and lens model.'
    ' This Jupyter notebook cell with progress once MultiNest has completed - this could take some time!'
)
phase_normal_results = phase_normal.run(data=ccd_data)
print('MultiNest has finished run - you may now continue the notebook.')

# Lets check we get a reasonably good model and fit to the data.
lens_fit_plotters.plot_fit_subplot(fit=phase_normal_results.most_likely_fit,
                                   should_plot_mask=True,
                                   extract_array_from_mask=True,
                                   zoom_around_mask=True)

print("Time without black magic = {}".format(time.time() - start))

# Now lets run the phase with black magic on, which will hopefully run a lot faster than the previous phase.

phase_black_magic = ph.LensSourcePlanePhase(
    lens_galaxies=dict(lens=gm.GalaxyModel(light=lp.EllipticalSersic,
                                           mass=mp.EllipticalIsothermal)),
    source_galaxies=dict(source=gm.GalaxyModel(light=lp.EllipticalSersic)),
    optimizer_class=nl.MultiNest,
    phase_name='7_with_black_magic')

# And herein lies the black magic. The changes to n_live_points and sampling efficiency are part of it, but its
# the constant efficiency mode wehre the real magic lies. However, lets not worry about whats happening just let, I will
# Lets go one step further. Now we know our parameter space is less complex, maybe we can find the maximum likelihood
# with fewer MultiNest live points and a faster sampling rate?
phase_1.optimizer.n_live_points = 30
phase_1.optimizer.sampling_efficiency = 0.9

# Lets run the phase, noting that our liberal approach to reducing the lens model complexity has reduced it to just
# 11 parameters. (The results are still preloaded for you, but feel free to run it yourself, its fairly quick).
print('MultiNest has begun running - checkout the workspace/howtolens/chapter_2_lens_modeling/output/5_linking_phases'
      'folder for live output of the results, images and lens model.'
      'This Jupyter notebook cell with progress once MultiNest has completed - this could take some time!')
phase_1_results = phase_1.run(data=ccd_data)
print('MultiNest has finished run - you may now continue the notebook.')

# And indeed, we get a reasonably good model and fit to the data - in a much shorter space of time!
lens_fit_plotters.plot_fit_subplot(fit=phase_1_results.most_likely_fit, should_plot_mask=True,
                                   extract_array_from_mask=True, zoom_around_mask=True)

# Now all we need to do is look at the results of phase 1 and tune our priors in phase 2 to those results. Lets
# setup a custom phase that does exactly that.
#
# GaussianPriors are a nice way to do this. They tell the non-linear search where to look, but leave open the
# possibility that there might be a better solution nearby. In contrast, UniformPriors put hard limits on what
# values a parameter can or can't take - it makes it more likely we'll accidently cut-out the global likelihood solution.

class CustomPriorPhase(ph.LensSourcePlanePhase):

    def pass_priors(self, previous_results):

        # What I've done here is looked at the results of phase 1, and manually specified a prior for every parameter.
        # If a parameter was fixed in the previous phase, its prior is based around the previous value. Don't worry
        # about the sigma values for now, I've chosen values that I know will ensure reasonable sampling, but we'll
# code is using a number of Python's object-oriented features. In general, I expect that'll you'll simply copy the code
# above and use it as a template).

# We can now create this custom phase like we did a normal phase before. When we run the phase, the pass_prior function
# will be called automatically and thus change the priors as we specified above. If you look at the 'model.info'
# file in the output of the non-linear search, you'll see that the priors have indeed been changed.
custom_phase = CustomPhase(
    lens_galaxies=dict(lens_galaxy=lens_galaxy_model),
    source_galaxies=dict(source_galaxy=source_galaxy_model),
    optimizer_class=non_linear.MultiNest,
    phase_name='2_custom_priors')

print(
    'MultiNest has begun running - checkout the workspace/howtolens/chapter_2_lens_modeling/output/2_custom_priors'
    'folder for live output of the results, images and lens model.'
    'This Jupyter notebook cell with progress once MultiNest has completed - this could take some time!'
)
results_custom = custom_phase.run(data=ccd_data)
lens_fit_plotters.plot_fit_subplot(fit=results_custom.most_likely_fit)
print('MultiNest has finished run - you may now continue the notebook.')

# And, we're done. This tutorial had some pretty difficult concepts to wrap your head around. However, I can't
# emphasize enough how important it is that you develop an intuition for non-linear searches and the notion of a
# non-linear parameter space. Becoming good at lens modeling is all being able to navigate a complex, degenerate and
# highly non-linear parameter space! Luckily, we're going to keep thinking about this in the next set of exercises,
# so if you're not feeling too confident yet, you should be in a few exercises time!

# Before continuing to the next tutorial, I want you think about whether anything could go wrong when we search a
# non-linear parameter space. Is it possible that we won't find the highest likelihood lens model? Why might this be?
#
# Try and list 3 reasons why this might happen. In the next tutorial, we'll learn about just that - failure!
示例#6
0
#    image and source fully account for the telescope optics and effect of the PSF.

# 3) The inversion's solution is regularized. But wait, that's what we'll cover in the next tutorial!

# Finally, let me show you how easy it is to fit an image with an inversion using the fitting module. Instead of giving
# the source galaxy a light profile, we give it a pixelization and regularization, and pass it to a tracer.
source_galaxy = g.Galaxy(pixelization=pix.Rectangular(shape=(40, 40)),
                         regularization=reg.Constant(coefficients=(1.0, )))
tracer = ray_tracing.TracerImageSourcePlanes(
    lens_galaxies=[lens_galaxy],
    source_galaxies=[source_galaxy],
    image_plane_grid_stack=lens_data.grid_stack,
    border=lens_data.border)

# Then, like before, we call on the fitting module to perform the fit to the lensing image. Indeed, we see
# some pretty good looking residuals - we're certainly fitting the lensed source accurately!
fit = lens_fit.fit_lens_data_with_tracer(lens_data=lens_data, tracer=tracer)
lens_fit_plotters.plot_fit_subplot(fit=fit,
                                   should_plot_mask=True,
                                   extract_array_from_mask=True,
                                   zoom_around_mask=True)

# And, we're done, here are a few questions to get you thinking about inversions:

# 1) The inversion provides the best-fit solution to the observed image. Is there a problem with seeking the 'best-fit'?
#    Is there a risk that we're going to fit other things in the image than just the lensed source galaxy? What happens
#    if you reduce the regularization 'coefficient' above to zero?

# 2) The exterior pixels in the rectangular grid have no image-pixels in them. However, they are still given a
#    reconstructed flux. If this value isn't' coming from a mapping to an image-pixel, where is it be coming from?
示例#7
0
    optimizer_class=nl.MultiNest,
    phase_name='4_tuned_priors')
print(
    'MultiNest has begun running - checkout the workspace/howtolens/chapter_2_lens_modeling/output/4_dealing_with_failure'
    'folder for live output of the results, images and lens model.'
    'This Jupyter notebook cell with progress once MultiNest has completed - this could take some time!'
)
custom_prior_result = custom_prior_phase.run(data=ccd_data)
print('MultiNest has finished run - you may now continue the notebook.')

# Bam! We get a good model. The right model. A glorious model! We gave our non-linear search a helping hand, and it
# repaid us in spades!

# Check out the PDF in the '/howstolens/chapter_2_lens_modeling/output/4_custom_priors/image' folder - what degeneracies
# do you notice between parameters?
lens_fit_plotters.plot_fit_subplot(fit=custom_prior_result.most_likely_fit)

# Okay, so we've learnt that by tuning our priors to the lens we're fitting, we can increase our chance of inferring a
# good lens model. Before moving onto the next approach, lets think about the advantages and disadvantages of prior
# tuning:

# Advantage - We found the maximum likelihood solution in parameter space.
# Advantage - The phase took less time to run, because the non-linear search explored less of parameter space.
# Disadvantage - If we specified one prior incorrectly, the non-linear search would have began and therefore ended at
#                an incorrect solution.
# Disadvantage - Our phase was tailored to this specific strong lens. If we want to fit a large sample of lenses, we'd
#                have to write a custom phase for every single one - this would take up a lot of our time!

### Approach 2 -  Reducing Complexity ###

# Our non-linear searched failed because we made the lens model more realistic and therefore more complex. Maybe we
    lens_galaxy = g.Galaxy(mass=mp.EllipticalIsothermal(
        centre=(0.0, 0.0), axis_ratio=0.8, phi=135.0, einstein_radius=1.6))
    tracer = ray_tracing.TracerImageSourcePlanes(
        lens_galaxies=[lens_galaxy],
        source_galaxies=[source_galaxy],
        image_plane_grid_stack=lens_data.grid_stack,
        border=lens_data.border)
    return lens_fit.fit_lens_data_with_tracer(lens_data=lens_data,
                                              tracer=tracer)


# Okay, so lets look at our fit from the previous tutorial in more detail. We'll use a higher resolution 40 x 40 grid.
source_galaxy = g.Galaxy(pixelization=pix.Rectangular(shape=(40, 40)),
                         regularization=reg.Constant(coefficients=(1.0, )))
fit = perform_fit_with_source_galaxy(source_galaxy=source_galaxy)
lens_fit_plotters.plot_fit_subplot(fit=fit)

# It still looks pretty good! However, this is because I sneakily chose a regularization coefficient that gives a
# good looking solution, without telling you. If we reduce this regularization coefficient to zero, our source
# reconstruction goes extremely weird.
source_galaxy = g.Galaxy(pixelization=pix.Rectangular(shape=(40, 40)),
                         regularization=reg.Constant(coefficients=(0.0, )))
no_regularization_fit = perform_fit_with_source_galaxy(
    source_galaxy=source_galaxy)
lens_fit_plotters.plot_fit_subplot(fit=no_regularization_fit,
                                   should_plot_mask=True,
                                   extract_array_from_mask=True,
                                   zoom_around_mask=True)

# So, what happening here, and why does removing regularization do this to our source reconstruction? When our inversion
# reconstructs a source, it doesn't *just* compute the set of fluxes that best-fit the image. It is also 'regularized',