def test__fit_interferometer_generator_from_aggregator(
    interferometer_7, visibilities_mask_7, mask_7x7, samples
):

    phase_interferometer_7x7 = al.PhaseInterferometer(
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        search=mock.MockSearch("test_phase_aggregator", samples=samples),
        real_space_mask=mask_7x7,
    )

    phase_interferometer_7x7.run(
        dataset=interferometer_7,
        mask=visibilities_mask_7,
        results=mock.MockResults(samples=samples),
    )

    agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)

    fit_interferometer_gen = al.agg.FitInterferometer(aggregator=agg)

    for fit_interferometer in fit_interferometer_gen:
        assert (
            fit_interferometer.masked_interferometer.interferometer.visibilities
            == interferometer_7.visibilities
        ).all()
        assert (
            fit_interferometer.masked_interferometer.real_space_mask == mask_7x7
        ).all()
def test__masked_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):

    phase_imaging_7x7 = al.PhaseImaging(
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        settings=al.SettingsPhaseImaging(
            settings_masked_imaging=al.SettingsMaskedImaging(
                grid_class=al.GridIterate,
                grid_inversion_class=al.GridInterpolate,
                fractional_accuracy=0.5,
                sub_steps=[2],
                pixel_scales_interp=0.1,
            )
        ),
        search=mock.MockSearch("test_phase_aggregator", samples=samples),
    )

    phase_imaging_7x7.run(
        dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
    )

    agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)

    masked_imaging_gen = al.agg.MaskedImaging(aggregator=agg)

    for masked_imaging in masked_imaging_gen:
        assert (masked_imaging.imaging.image == imaging_7x7.image).all()
        assert isinstance(masked_imaging.grid, al.GridIterate)
        assert isinstance(masked_imaging.grid_inversion, al.GridInterpolate)
        assert masked_imaging.grid.sub_steps == [2]
        assert masked_imaging.grid.fractional_accuracy == 0.5
        assert masked_imaging.grid_inversion.pixel_scales_interp == (0.1, 0.1)
def test_float_inequality(session):
    aggregator = af.Aggregator(session)

    for sigma in [
        0.9992237362814176,
        4.9687212446221904,
        9.967065800134504,
    ]:
        session.add(
            af.db.Fit(
                id=str(uuid4()),
                instance={
                    "gaussian": Gaussian(
                        sigma=sigma
                    )
                }
            )
        )
        session.commit()

    assert len(aggregator) == 3

    assert len(aggregator.query(
        aggregator.gaussian.sigma < 3
    )) == 1
Esempio n. 4
0
def test_numpy_values(session):
    aggregator = af.Aggregator(session)
    fit = af.db.Fit(id=str(uuid4()))
    array = np.zeros((10, 10))
    fit["data"] = array
    session.add(fit)
    session.commit()

    assert (aggregator.values("data")[0] == array).all()
Esempio n. 5
0
def test_incomplete(grid_search, session):
    grid_search.save_metadata()

    Scraper(directory=output_directory, session=session).scrape()

    session.commit()

    aggregator = af.Aggregator(session)
    aggregator = aggregator(aggregator.search.is_complete)
    assert len(aggregator) == 0
Esempio n. 6
0
def read_in(session):
    aggregator = af.Aggregator(directory.parent.parent.parent.parent /
                               "rjlens")
    for item in aggregator:
        obj = db.Object.from_object(item.model)
        session.add(obj)
    yield
    try:
        os.remove(database_path)
    except FileNotFoundError:
        pass
Esempio n. 7
0
def make_path_aggregator(session):
    fits = [
        af.db.Fit(id="complete", is_complete=True),
        af.db.Fit(id="incomplete", is_complete=False)
    ]
    for i, fit in enumerate(fits):
        fit["dataset"] = {"name": "dataset"}
        fit["pipeline"] = f"pipeline{i}"
    session.add_all(fits)
    session.flush()
    return af.Aggregator(session)
Esempio n. 8
0
def test_scrape(grid_search, parent_search, model_gaussian_x1, session):
    grid_search.fit(model=model_gaussian_x1,
                    analysis=af.m.MockAnalysis(),
                    parent=parent_search,
                    grid_priors=[model_gaussian_x1.centre])
    parent_search.fit(model=model_gaussian_x1, analysis=af.m.MockAnalysis())
    parent_search.paths.save_all()

    Scraper(directory=output_directory, session=session).scrape()

    aggregator = af.Aggregator(session)
    assert list(
        aggregator.query(aggregator.search.id == grid_search.paths.identifier)
    )[0].parent.id == parent_search.paths.identifier
    assert len(aggregator.values("max_log_likelihood")) > 0
    assert list(aggregator.grid_searches())[0].is_complete
def test__masked_interferometer_generator_from_aggregator(
    interferometer_7, visibilities_mask_7, mask_7x7, samples
):

    phase_interferometer_7x7 = al.PhaseInterferometer(
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        settings=al.SettingsPhaseInterferometer(
            settings_masked_interferometer=al.SettingsMaskedInterferometer(
                transformer_class=al.TransformerDFT,
                grid_class=al.GridIterate,
                grid_inversion_class=al.GridInterpolate,
                fractional_accuracy=0.5,
                sub_steps=[2],
                pixel_scales_interp=0.1,
            )
        ),
        search=mock.MockSearch("test_phase_aggregator", samples=samples),
        real_space_mask=mask_7x7,
    )

    phase_interferometer_7x7.run(
        dataset=interferometer_7,
        mask=visibilities_mask_7,
        results=mock.MockResults(samples=samples),
    )

    agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)

    masked_interferometer_gen = al.agg.MaskedInterferometer(aggregator=agg)

    for masked_interferometer in masked_interferometer_gen:
        assert (
            masked_interferometer.interferometer.visibilities
            == interferometer_7.visibilities
        ).all()
        assert (masked_interferometer.real_space_mask == mask_7x7).all()
        assert isinstance(masked_interferometer.grid, al.GridIterate)
        assert isinstance(masked_interferometer.grid_inversion, al.GridInterpolate)
        assert masked_interferometer.grid.sub_steps == [2]
        assert masked_interferometer.grid.fractional_accuracy == 0.5
        assert masked_interferometer.grid_inversion.pixel_scales_interp == (0.1, 0.1)
        assert isinstance(masked_interferometer.transformer, al.TransformerDFT)
def test__fit_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):

    phase_imaging_7x7 = al.PhaseImaging(
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        search=mock.MockSearch("test_phase_aggregator", samples=samples),
    )

    phase_imaging_7x7.run(
        dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
    )

    agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)

    fit_imaging_gen = al.agg.FitImaging(aggregator=agg)

    for fit_imaging in fit_imaging_gen:
        assert (fit_imaging.masked_imaging.imaging.image == imaging_7x7.image).all()
Esempio n. 11
0
def test__masked_imaging_generator_from_aggregator(imaging_7x7, mask_7x7):

    phase_imaging_7x7 = al.PhaseImaging(
        non_linear_class=mock_pipeline.MockNLO,
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        phase_name="test_phase_aggregator",
    )

    phase_imaging_7x7.run(dataset=imaging_7x7,
                          mask=mask_7x7,
                          results=mock_pipeline.MockResults())

    agg = af.Aggregator(directory=phase_imaging_7x7.paths.phase_output_path)

    masked_imaging_gen = al.agg.MaskedImaging(aggregator=agg)

    for masked_imaging in masked_imaging_gen:
        assert (masked_imaging.imaging.image == imaging_7x7.image).all()
def test__tracer_generator_from_aggregator(imaging_7x7, mask_7x7, samples):

    phase_imaging_7x7 = al.PhaseImaging(
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        search=mock.MockSearch("test_phase_aggregator", samples=samples),
    )

    phase_imaging_7x7.run(
        dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
    )

    agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)

    tracer_gen = al.agg.Tracer(aggregator=agg)

    for tracer in tracer_gen:

        assert tracer.galaxies[0].redshift == 0.5
        assert tracer.galaxies[0].light.centre == (0.0, 1.0)
        assert tracer.galaxies[1].redshift == 1.0
Esempio n. 13
0
def test__tracer_generator_from_aggregator(imaging_7x7, mask_7x7):

    phase_imaging_7x7 = al.PhaseImaging(
        non_linear_class=mock_pipeline.MockNLO,
        galaxies=dict(
            lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
            source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
        ),
        phase_name="test_phase_aggregator",
    )

    phase_imaging_7x7.run(dataset=imaging_7x7,
                          mask=mask_7x7,
                          results=mock_pipeline.MockResults())

    agg = af.Aggregator(directory=phase_imaging_7x7.paths.phase_output_path)

    tracer_gen = al.agg.Tracer(aggregator=agg)

    for tracer in tracer_gen:

        assert tracer.galaxies[0].redshift == 0.5
        assert tracer.galaxies[0].light.centre == (0.0, 1.0)
        assert tracer.galaxies[1].redshift == 1.0
pipeline_meta = "pipeline_initialize__x1_gaussian + pipeline_main__x1_gaussian"


# First, we create an instance of the aggregator, which takes the output path as input, telling it where to load
# results from.

### FEATURE - it would be good if this printed summary information of the pipelines / phases found. For example:

### Aggregator has found the results of fits in the following 3 folders:

### Folder 1 - gaussian_x1__x3_fits/gaussian_x1__sigma_0.1
### Folder 2 - gaussian_x1__x3_fits/gaussian_x1__sigma_0.5
### Folder 3 - gaussian_x1__x3_fits/gaussian_x1__sigma_1.0

aggregator = af.Aggregator(directory=output_path + output_folder)

# We can create a list of the optimizer classes of every phase. When you make a phase in a pipeline, you are
# probably used to seeing the following line:

# optimizer_class=af.MultiNest

# This line is telling the Phase which non-linear optimizer to use, in this case MultiNest. Every optimizer is created
# as an instance of a class in Python, which has methods built-in allowing one to manipulate its results.


#### Bug 2 - following the refactor the MultiNest class and MultiNest output classes are separate. We want to use the
#### output class to manipulate results in this script. Below, I have to manually setup the output class, which si simple
#### but makes the aggregator API a bit clunky. Can we have a method 'aggregator.non_linear_output'?

optimizers = aggregator.optimizers_with(pipeline=pipeline_meta, phase=phase_name)
Esempio n. 15
0
def make_aggregator():
    yield af.Aggregator(aggregator_directory)
    rmtree(f"{aggregator_directory}/one")
    rmtree(f"{aggregator_directory}/two")
    rmtree(f"{aggregator_directory}/three")
Esempio n. 16
0
def make_aggregator():
    return af.Aggregator(aggregator_directory)
Esempio n. 17
0
individually using the same phase, producing a large set of results on your hard disk corresponding to the full sample.
That will be a lot of paths and directories to navigate! At some point, there`ll be too many results for it to be
a sensible use of your time to analyse the results by sifting through the outputs on your hard disk.

PyAutoFit`s aggregator tool allows us to load results in a Python script or, more impotantly, a Jupyter notebook.
All we have to do is point the aggregator to the output directory from which we want to load results, which in this c
ase will be the results of the first `NonLinearSearch` of this chapter.
"""

# %%
"""
To set up the aggregator we simply pass it the folder of the results we want to load.
"""

# %%
agg = af.Aggregator(directory="output")
agg = agg.filter(agg.phase == "phase_t1_non_linear_search")

# %%
"""
We get the output of the results of the model-fit performed in tutorial 1, given that is the directory we point too. 
This gives us a list with 1 entry, the list would have more entries if there were more results in the path.
"""

# %%
samples = list(agg.values("samples"))

# %%
"""
From here, we can inspect results as we please, for example printing the maximum log likelihood model of the phase.
"""
Esempio n. 18
0
#!/usr/bin/env python
from sys import argv

import autofit as af

if __name__ == "__main__":
    aggregator = af.Aggregator(af.db.open_database("sqlite://"))
    aggregator.add_directory(argv[1])
    print(aggregator)
import autolens as al
import autolens.plot as aplt

# In this tutorial, we use the aggregator to load models and data from a non-linear search and use them to reperform
# fits to the data.

# Below, we set up the aggregator as we did in the previous tutorial.
workspace_path = "{}/../../".format(os.path.dirname(
    os.path.realpath(__file__)))
output_path = workspace_path + "output"
aggregator_results_path = output_path + "/aggregator_sample_beginner"

af.conf.instance = af.conf.Config(config_path=str(workspace_path + "/config"),
                                  output_path=str(output_path))

aggregator = af.Aggregator(directory=str(aggregator_results_path))

# Again, we create a list of the MultiNestOutputs of each phase.
pipeline_name = "pipeline__lens_sie__source_inversion"
phase_name = "phase_3__source_inversion"

outputs = aggregator.filter(phase=phase_name).output

# We can also use the aggregator to load the dataset of every lens our pipeline fitted. This returns the dataset
# as the "Imaging" objects we passed to the pipeline when we ran them.

datasets = aggregator.filter(phase=phase_name).dataset
print("Datasets:")
print(datasets, "\n")

# Lets plot each dataset's subplot.
Esempio n. 20
0
def test_aggregate_completed(session):
    session.commit()
    aggregator = af.Aggregator(session)
    aggregator = aggregator(aggregator.search.is_complete)
    grid_searches = aggregator.grid_searches()
    assert len(grid_searches) == 1
Esempio n. 21
0
def test_aggregate(session):
    aggregator = af.Aggregator(session)

    grid_searches = aggregator.grid_searches()
    assert len(grid_searches) == 1
    assert len(grid_searches.children()) > 0
        f"Emcee has begun running, checkout \n"
        f"autofit_workspace/output/howtofit/chapter_graphica_models/tutorial_1_global_model/{dataset_name} for live \n"
        f"output of the results. This Jupyter notebook cell with progress once Emcee has completed, this could take a \n"
        f"few minutes!")

    emcee.fit(model=model, analysis=analysis)
"""
Checkout the output folder, you should see three new sets of results corresponding to our 3 `Gaussian` datasets.

In the `model.results` file of each fit, it will be clear that the `centre` value of every fit (and the other 
parameters) have much larger errors than other **PyAutoFit** examples due to the low signal to noise of the data.
.
We now load the results of all 3 model-fits using the `Aggregator`, so we can try determine the global `centre` value.
"""
agg = af.Aggregator(
    directory=path.join("output", "howtofit", "chapter_graphical_models",
                        "tutorial_1_global_model"))
"""
The aggregator allows us to plot the median PDF value and 3.0 confidence intervals of the `centre` estimate from
the model-fit to each dataset.
"""
import matplotlib.pyplot as plt

mp_instances = [samps.median_pdf_instance for samps in agg.values("samples")]
ue3_instances = [
    samp.error_instance_at_upper_sigma(sigma=3.0)
    for samp in agg.values("samples")
]
le3_instances = [
    samp.error_instance_at_lower_sigma(sigma=3.0)
    for samp in agg.values("samples")
Esempio n. 23
0
print("Emcee has finished run - you may now continue the notebook.")

# %%
"""
Checkout the output folder - you should see three new sets of results corresponding to our 3 Gaussian datasets.
Unlike previous tutorials, these folders in the output folder are named after the dataset and contain the folder
with the phase's name, as opposed to just the phase-name folder.

To load these results with the aggregator, we simply point it to the path of the results we want it to inspect.
"""

# %%
output_path = f"{workspace_path}/output/howtofit"

agg = af.Aggregator(directory=str(output_path))

# %%
"""
To begin, let me quickly explain what a generator is in Python, for those unaware. A generator is an object that 
iterates over a function when it is called. The aggregator creates all objects as generators, rather than lists, or 
dictionaries, or whatever.

Why? Because lists and dictionaries store every entry in memory simultaneously. If you fit many datasets, you'll have 
lots of results and therefore use a lot of memory. This will crash your laptop! On the other hand, a generator only 
stores the object in memory when it runs the function; it is free to overwrite it afterwards. Thus, your laptop won't 
crash!

There are two things to bare in mind with generators:

 1) A generator has no length, thus to determine how many entries of data it corresponds to you first must turn it to a 
Esempio n. 24
0
# Now we'll ue this path to explicitly set the config path and output path.
af.conf.instance = af.conf.Config(
    config_path=str(workspace_path / "config"), output_path=str(output_path)
)

# To use the aggregator we have to supply it with the pipeline name we want to load results from and the phase name
# of that pipeline we want to load results from. Lets load the results of the pipeline's final phase.

output_folder = "gaussian_x1__x3_fits/"
pipeline_name = "pipeline_main__x1_gaussian"
phase_name = "phase_1__x1_gaussian_final"

# First, we create an instance of the aggregator, which takes the output path as input, telling it where to load
# results from.

aggregator = af.Aggregator(directory=str(output_path / output_folder))

# We can create a list of the optimizer classes of every phase. When you make a phase in a pipeline, you are
# probably used to seeing the following line:

# optimizer_class=af.MultiNest

# This line is telling the Phase which non-linear optimizer to use, in this case MultiNest. Every optimizer is created
# as an instance of a class in Python, which has methods built-in allowing one to manipulate its results.

non_linear_outputs = aggregator.filter(pipeline=pipeline_name, phase=phase_name).output

# First, if we print the non_linear_outputs we'll see that we have a Python list of two non_linear_outputs. These are the non_linear_outputs
# of phase 1 of our main pipeline for the image we modeled.

# If we had modeled 100 images, we'd have 100 non_linear_outputs. Clearly, with that in mind, for the code below you should