def test__samples_from_model(self): lbfgs = af.LBFGS() lbfgs.paths = af.DirectoryPaths(path_prefix=path.join("non_linear", "LBFGS")) lbfgs.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx3) model.mock_class.one = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) model.mock_class.three = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) samples = lbfgs.samples_from(model=model) assert isinstance(samples.parameter_lists, list) assert isinstance(samples.parameter_lists[0], list) assert isinstance(samples.log_likelihood_list, list) assert isinstance(samples.log_prior_list, list) assert isinstance(samples.log_posterior_list, list) assert samples.parameter_lists[0] == pytest.approx( [50.005469, 25.143677, 10.06950], 1.0e-4 ) assert samples.log_likelihood_list[0] == pytest.approx(-45.134121, 1.0e-4) assert samples.log_posterior_list[0] == pytest.approx(-44.97504284, 1.0e-4) assert samples.weight_list[0] == 1.0 assert len(samples.parameter_lists) == 1 assert len(samples.log_likelihood_list) == 1
def test__samples_from_model(self): pyswarms = af.PySwarmsGlobal() pyswarms.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "pyswarms")) pyswarms.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx3) model.mock_class.one = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) model.mock_class.three = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) # model.mock_class.four = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) samples = pyswarms.samples_from(model=model) assert isinstance(samples.parameter_lists, list) assert isinstance(samples.parameter_lists[0], list) assert isinstance(samples.log_likelihood_list, list) assert isinstance(samples.log_prior_list, list) assert isinstance(samples.log_posterior_list, list) assert samples.parameter_lists[0] == pytest.approx( [50.1254, 1.04626, 10.09456], 1.0e-4) assert samples.log_likelihood_list[0] == pytest.approx( -5071.80777, 1.0e-4) assert samples.log_posterior_list[0] == pytest.approx( -5070.73298, 1.0e-4) assert samples.weight_list[0] == 1.0 assert len(samples.parameter_lists) == 500 assert len(samples.log_likelihood_list) == 500
def test__log_prior_from_value(self): gaussian_simple = af.LogUniformPrior(lower_limit=1e-8, upper_limit=1.0) log_prior = gaussian_simple.log_prior_from_value(value=1.0) assert log_prior == 1.0 log_prior = gaussian_simple.log_prior_from_value(value=2.0) assert log_prior == 0.5 log_prior = gaussian_simple.log_prior_from_value(value=4.0) assert log_prior == 0.25 gaussian_simple = af.LogUniformPrior(lower_limit=50.0, upper_limit=100.0) log_prior = gaussian_simple.log_prior_from_value(value=1.0) assert log_prior == 1.0 log_prior = gaussian_simple.log_prior_from_value(value=2.0) assert log_prior == 0.5 log_prior = gaussian_simple.log_prior_from_value(value=4.0) assert log_prior == 0.25
def test__samples_from_model(self): drawer = af.Drawer() drawer.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "drawer")) drawer.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx3) model.mock_class.one = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) model.mock_class.three = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) samples = drawer.samples_from(model=model) assert isinstance(samples.parameter_lists, list) assert isinstance(samples.parameter_lists[0], list) assert isinstance(samples.log_likelihood_list, list) assert isinstance(samples.log_prior_list, list) assert isinstance(samples.log_posterior_list, list) assert samples.parameter_lists[0] == pytest.approx( [49.507679, 49.177471, 14.76753], 1.0e-4) assert samples.log_likelihood_list[0] == pytest.approx( -2763.925766, 1.0e-4) assert samples.log_posterior_list[0] == pytest.approx( -2763.817517, 1.0e-4) assert samples.weight_list[0] == 1.0 assert len(samples.parameter_lists) == 3 assert len(samples.log_likelihood_list) == 3
def test__lower_limit_zero_or_below_raises_error(self): with pytest.raises(exc.PriorException): af.LogUniformPrior(lower_limit=-1.0, upper_limit=1.0) with pytest.raises(exc.PriorException): af.LogUniformPrior(lower_limit=0.0, upper_limit=1.0)
def test__samples_from_model(self): emcee = af.Emcee() emcee.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "emcee")) emcee.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx4) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) samples = emcee.samples_from(model=model) assert isinstance(samples.parameter_lists, list) assert isinstance(samples.parameter_lists[0], list) assert isinstance(samples.log_likelihood_list, list) assert isinstance(samples.log_prior_list, list) assert isinstance(samples.log_posterior_list, list) assert isinstance(samples.weight_list, list) assert samples.parameter_lists[0] == pytest.approx( [0.173670, 0.162607, 3095.28, 0.62104], 1.0e-4) assert samples.log_likelihood_list[0] == pytest.approx( -17257775239.32677, 1.0e-4) assert samples.log_prior_list[0] == pytest.approx( 1.6102016075510708, 1.0e-4) assert samples.weight_list[0] == pytest.approx(1.0, 1.0e-4) assert samples.total_steps == 1000 assert samples.total_walkers == 10 assert samples.auto_correlations.times[0] == pytest.approx( 31.98507, 1.0e-4)
def test__non_zero_lower_limit(self): log_uniform_half = af.LogUniformPrior(lower_limit=0.5, upper_limit=1.0) assert log_uniform_half.value_for(0.0) == 0.5 assert log_uniform_half.value_for(1.0) == 1.0 assert log_uniform_half.value_for(0.5) == pytest.approx( 0.70710678118, 1.0e-4)
def test__simple_assumptions(self): log_uniform_simple = af.LogUniformPrior(lower_limit=1.0e-8, upper_limit=1.0) assert log_uniform_simple.value_for(0.0) == 1.0e-8 assert log_uniform_simple.value_for(1.0) == 1.0 assert log_uniform_simple.value_for(0.5) == 0.0001
def update_mass_to_light_ratio_prior( model: af.Model(al.lmp.LightMassProfile), result: af.Result, einstein_mass_range: Tuple[float, float], bins: int = 100, ) -> Optional[af.Model]: """ Updates the mass to light ratio parameter of a `LightMassProfile` model (e.g. a bulge or disk) such that the the `LogUniformPrior` on the mass-to-light ratio of the model-component is set with lower and upper limits that are a multiple of the Einstein mass computed in the previous SOURCE PIPELINE. For example, if `einstein_mass_range=[0.01, 5.0]` the mass to light ratio will use priors corresponding to values which give Einstein masses 1% and 500% of the estimated Einstein mass. Parameters ---------- model The light and mass profile whoses priors are passed from the LIGHT PIPELINE. result The result of the LIGHT PIPELINE used to pass the priors. einstein_mass_range The values a the estimate of the Einstein Mass in the LIGHT PIPELINE is multiplied by to set the lower and upper limits of the profile's mass-to-light ratio. bins The number of bins used to map a calculated Einstein Mass to that of the `LightMassProfile`. Returns ------- af.Model(mp.LightMassProfile) The light and mass profile whose mass-to-light ratio prior is set using the input Einstein mass and range. """ if model is None: return None grid = result.max_log_likelihood_fit.grid einstein_radius = result.max_log_likelihood_tracer.einstein_radius_from(grid=grid) einstein_mass = result.max_log_likelihood_tracer.einstein_mass_angular_from( grid=grid ) einstein_mass_lower = einstein_mass_range[0] * einstein_mass einstein_mass_upper = einstein_mass_range[1] * einstein_mass instance = model.instance_from_prior_medians() mass_to_light_ratio_lower = instance.normalization_via_mass_angular_from( mass_angular=einstein_mass_lower, radius=einstein_radius, bins=bins ) mass_to_light_ratio_upper = instance.normalization_via_mass_angular_from( mass_angular=einstein_mass_upper, radius=einstein_radius, bins=bins ) model.mass_to_light_ratio = af.LogUniformPrior( lower_limit=mass_to_light_ratio_lower, upper_limit=mass_to_light_ratio_upper ) return model
def test_change_class(): gaussian_0 = af.Model(af.Gaussian, normalization=af.UniformPrior(lower_limit=1e-6, upper_limit=1e6)) gaussian_1 = af.Model(af.Gaussian, normalization=af.LogUniformPrior(lower_limit=1e-6, upper_limit=1e6)) assert Identifier(gaussian_0) != Identifier(gaussian_1)
def test_log_prior_list_from_vector(self): mapper = af.ModelMapper() mapper.mock_class = af.PriorModel(af.m.MockClassx2) mapper.mock_class.one = af.GaussianPrior(mean=1.0, sigma=2.0) mapper.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) log_prior_list = mapper.log_prior_list_from_vector(vector=[0.0, 5.0]) assert log_prior_list == [0.125, 0.2]
def test__samples_from_model(self): # Setup pickle of mock Dynesty sampler that the samples_from_model function uses. results = MockDynestyResults( samples=np.array([[1.0, 2.0, 3.0, 5.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]]), logl=[1.0, 2.0, 3.0], logwt=[np.log(1.0), np.log(2.0), np.log(3.0)], ncall=[5.0, 5.0], logz=[-2.0, -1.0, 0.0], nlive=3, ) sampler = MockDynestySampler(results=results) paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "dynesty")) paths._identifier = "tag" dynesty = af.DynestyStatic(nlive=3) dynesty.paths = paths with open(path.join(dynesty.paths.samples_path, "dynesty.pickle"), "wb") as f: dill.dump(sampler, f) model = af.ModelMapper(mock_class=mock.MockClassx4) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) samples = dynesty.samples_from(model=model) assert isinstance(samples.parameter_lists, list) assert isinstance(samples.parameter_lists[0], list) assert isinstance(samples.log_likelihood_list, list) assert isinstance(samples.log_prior_list, list) assert isinstance(samples.log_posterior_list, list) assert isinstance(samples.weight_list, list) assert samples.parameter_lists == [ [1.0, 2.0, 3.0, 5.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], ] assert samples.log_likelihood_list == [1.0, 2.0, 3.0] assert samples.log_prior_list == [0.2, 0.25, 0.25] assert samples.weight_list == pytest.approx([1.0, 2.0, 3.0], 1.0e-4) assert samples.total_samples == 10 assert samples.log_evidence == 0.0 assert samples.number_live_points == 3
def test__median_pdf_parameters(self): emcee = af.Emcee() emcee.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "emcee")) emcee.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx4) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) samples = emcee.samples_from(model=model) assert samples.median_pdf_vector == pytest.approx( [0.008422, -0.026413, 9.9579656, 0.494618], 1.0e-3)
def make_pipeline(name, folders, search=af.DynestyStatic()): lens = al.GalaxyModel(redshift=0.5, mass=al.mp.EllipticalIsothermal) lens.mass.centre_0 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01) lens.mass.centre_1 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01) lens.mass.einstein_radius = af.UniformPrior(lower_limit=1.55, upper_limit=1.65) source = al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic) source.light.centre_0 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01) source.light.centre_1 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01) source.light.intensity = af.UniformPrior(lower_limit=0.35, upper_limit=0.45) source.light.effective_radius = af.UniformPrior(lower_limit=0.45, upper_limit=0.55) source.light.sersic_index = af.UniformPrior(lower_limit=0.9, upper_limit=1.1) class GridPhase( af.as_grid_search(phase_class=al.PhaseImaging, parallel=True)): @property def grid_priors(self): return [ self.model.galaxies.subhalo.mass.centre_0, self.model.galaxies.subhalo.mass.centre_1, ] subhalo = al.GalaxyModel(redshift=0.5, mass=al.mp.SphericalTruncatedNFWMCRLudlow) subhalo.mass.mass_at_200 = af.LogUniformPrior(lower_limit=1.0e6, upper_limit=1.0e11) subhalo.mass.centre_0 = af.UniformPrior(lower_limit=-2.5, upper_limit=2.5) subhalo.mass.centre_1 = af.UniformPrior(lower_limit=-2.5, upper_limit=2.5) phase1 = GridPhase( phase_name="phase_1", folders=folders, galaxies=dict(lens=lens, subhalo=subhalo, source=source), search=search, settings=al.SettingsPhaseImaging(), number_of_steps=2, ) return al.PipelineDataset(name, phase1)
def test_log_uniform_prior(result): result.grid_priors = [af.LogUniformPrior()] assert result.physical_lower_limits_lists == [[1e-06], [ pytest.approx(0.001, rel=0.01) ]] assert result.physical_centres_lists == [[ pytest.approx(3.1622776601683795e-05, rel=0.01) ], [pytest.approx(0.03162277660168379, rel=0.01)]] assert result.physical_upper_limits_lists == [[ pytest.approx(0.001, rel=0.01) ], [pytest.approx(1.0, rel=0.01)]]
def test__autocorrelation_times(self): emcee = af.Emcee() emcee.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "emcee")) emcee.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx4) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) samples = emcee.samples_from(model=model) assert samples.auto_correlations.previous_times == pytest.approx( [31.1079, 36.0910, 72.44768, 65.86194], 1.0e-4) assert samples.auto_correlations.times == pytest.approx( [31.98507, 36.51001, 73.47629, 67.67495], 1.0e-4)
def test_log10( lower_limit, upper_limit, unit ): prior = af.LogUniformPrior( lower_limit=lower_limit, upper_limit=upper_limit ) assert 10.0 ** ( np.log10(lower_limit) + unit * (np.log10(upper_limit) - np.log10(lower_limit)) ) == pytest.approx( prior.value_for( unit ), abs=0.001 )
def test__samples_from_model(self, multi_nest_samples_path, multi_nest_resume_path, multi_nest_summary_path): multi_nest = af.MultiNest() multi_nest.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "multinest")) create_weighted_samples_4_parameters( file_path=multi_nest.paths.samples_path) create_resume(file_path=multi_nest.paths.samples_path) create_summary_4_parameters(file_path=multi_nest.paths.samples_path) model = af.ModelMapper(mock_class=mock.MockClassx4) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) samples = multi_nest.samples_from(model=model) assert samples.parameter_lists == [ [1.1, 2.1, 3.1, 4.1], [0.9, 1.9, 2.9, 3.9], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0], ] value = -0.5 * 9999999.9 assert samples.log_likelihood_list == 10 * [value] assert samples.log_prior_list == pytest.approx([ 0.243902, 0.256410, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25 ], 1.0e-4) assert samples.weight_list == [ 0.02, 0.02, 0.01, 0.05, 0.1, 0.1, 0.1, 0.1, 0.2, 0.3 ] assert samples.total_samples == 12345 assert samples.log_evidence == 0.02 assert samples.number_live_points == 50
def customize_priors(self, results): ### Lens Mass, PL -> PL, Shear -> Shear ### # self.galaxies.lens = results.from_phase('phase_1').\ # constant.galaxies.lens ### Lens Subhalo, Adjust priors to physical masses (10^6 - 10^10) and concentrations (6-24) self.galaxies.subhalo.mass.mass_at_200 = af.LogUniformPrior( lower_limit=10.0e6, upper_limit=10.0e9) self.galaxies.subhalo.mass.centre_0 = af.UniformPrior( lower_limit=-2.0, upper_limit=2.0) self.galaxies.subhalo.mass.centre_1 = af.UniformPrior( lower_limit=-2.0, upper_limit=2.0) ### Source Light, Sersic -> Sersic ### self.galaxies.source.light.centre = ( results.from_phase("phase_1").variable_absolute( a=0.05).galaxies.source.light.centre) self.galaxies.source.light.intensity = ( results.from_phase("phase_1").variable_relative( r=0.5).galaxies.source.light.intensity) self.galaxies.source.light.effective_radius = ( results.from_phase("phase_1").variable_relative( r=0.5).galaxies.source.light.effective_radius) self.galaxies.source.light.sersic_index = ( results.from_phase("phase_1").variable_relative( r=0.5).galaxies.source.light.sersic_index) self.galaxies.source.light.axis_ratio = ( results.from_phase("phase_1").variable_absolute( a=0.1).galaxies.source.light.axis_ratio) self.galaxies.source.light.phi = ( results.from_phase("phase_1").variable_absolute( a=20.0).galaxies.source.light.phi)
def test__vector_at_sigma__uses_output_files(self): emcee = af.Emcee() emcee.paths = af.DirectoryPaths( path_prefix=path.join("non_linear", "emcee")) emcee.paths._identifier = "tag" model = af.ModelMapper(mock_class=af.m.MockClassx4) model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) samples = emcee.samples_from(model=model) parameters = samples.vector_at_sigma(sigma=3.0) assert parameters[0][0:2] == pytest.approx((-0.003197, 0.019923), 1e-2) parameters = samples.vector_at_sigma(sigma=1.0) assert parameters[0][0:2] == pytest.approx((0.0042278, 0.01087681), 1e-2)
def hyper_galaxy_via_galaxy_model_from(self, galaxy_model, galaxy_instance, noise_factor_is_model=False): hyper_galaxy = af.Model(ag.HyperGalaxy) if galaxy_model.hyper_galaxy is None: return None if not noise_factor_is_model: hyper_galaxy.noise_factor = galaxy_instance.hyper_galaxy.noise_factor else: hyper_galaxy.noise_factor = af.LogUniformPrior( lower_limit=1e-4, upper_limit=2.0 * galaxy_instance.hyper_galaxy.noise_factor, ) hyper_galaxy.contribution_factor = ( galaxy_instance.hyper_galaxy.contribution_factor) hyper_galaxy.noise_power = galaxy_instance.hyper_galaxy.noise_power return hyper_galaxy
color="k", ecolor="k", elinewidth=1, capsize=2, ) plt.show() plt.close() """ __Model + Analysis__ We create the model and analysis, which in this example is a single `Gaussian` and therefore has dimensionality N=3. """ model = af.Model(m.Gaussian) model.centre = af.UniformPrior(lower_limit=0.0, upper_limit=100.0) model.intensity = af.LogUniformPrior(lower_limit=1e-2, upper_limit=1e2) model.sigma = af.UniformPrior(lower_limit=0.0, upper_limit=30.0) analysis = a.Analysis(data=data, noise_map=noise_map) """ __Search__ We now create and run the `MultiNest` object which acts as our non-linear search. We manually specify all of the MultiNest settings, descriptions of which are provided at the following webpage: - https://github.com/JohannesBuchner/MultiNest - https://github.com/JohannesBuchner/PyMultiNest - http://johannesbuchner.github.io/PyMultiNest/index.html# """ multi_nest = af.MultiNest(
visibilities=np.add(visibilities, noise_map), noise_map=noise_map, z_step_kms=z_step_kms) lens_model = af.PriorModel(mass_profiles.EllipticalPowerLaw) lens_model.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.25) lens_model.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.25) lens_model.einstein_radius = af.UniformPrior(lower_limit=0.85, upper_limit=1.25) lens_model.slope = 2.0 src_model = af.PriorModel(profiles.Kinematical) src_model.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.25) src_model.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.25) src_model.z_centre = af.GaussianPrior(mean=16.0, sigma=2.0) src_model.intensity = af.LogUniformPrior(lower_limit=10**-2.0, upper_limit=10**+2.0) src_model.maximum_velocity = af.UniformPrior(lower_limit=25.0, upper_limit=400.0) src_model.velocity_dispersion = af.UniformPrior(lower_limit=0.0, upper_limit=100.0) phase_folders = [ string_utils.remove_substring_from_end_of_string( string=os.path.basename(__file__), substring=".py") ] phase_1 = phase.Phase( phase_name="phase_1__version_{}".format(autolens_version), phase_folders=phase_folders, profiles=af.CollectionPriorModel( lens=lens_model,
# ncols=8, # cube_contours=lensed_cube, # ) # exit() lens_model = af.PriorModel(mass_profiles.EllipticalPowerLaw) lens_model.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.25) lens_model.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.25) lens_model.einstein_radius = af.UniformPrior(lower_limit=0.85, upper_limit=1.25) lens_model.slope = 2.0 source_model_1 = af.PriorModel(profiles.EllipticalSersic) source_model_1.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.25) source_model_1.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.25) source_model_1.intensity = af.LogUniformPrior(lower_limit=5.0 * 10**-6.0, upper_limit=5.0 * 10**-4.0) source_model_2 = af.PriorModel(profiles.Kinematical) source_model_2.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.25) source_model_2.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.25) source_model_2.z_centre = af.GaussianPrior(mean=16.0, sigma=2.0) source_model_2.intensity = af.LogUniformPrior(lower_limit=10**-2.0, upper_limit=10**+2.0) source_model_2.maximum_velocity = af.UniformPrior(lower_limit=25.0, upper_limit=400.0) source_model_2.velocity_dispersion = af.UniformPrior(lower_limit=0.0, upper_limit=100.0) phase_folders = [ string_utils.remove_substring_from_end_of_string( string=os.path.basename(__file__), substring=".py")
def test__identifier_description__after_take_attributes(): model = af.CollectionPriorModel(gaussian=af.PriorModel( af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), sigma=af.GaussianPrior( mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0), )) model.take_attributes(source=model) identifier = Identifier([model]) description = identifier.description.splitlines() # THIS TEST FAILS DUE TO THE BUG DESCRIBED IN A GITHUB ISSUE. i = 0 assert description[i] == "CollectionPriorModel" i += 1 assert description[i] == "item_number" i += 1 assert description[i] == "0" i += 1 assert description[i] == "gaussian" i += 1 assert description[i] == "PriorModel" i += 1 assert description[i] == "cls" i += 1 assert description[i] == "autofit.example.model.Gaussian" i += 1 assert description[i] == "centre" i += 1 assert description[i] == "UniformPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "0.0" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "normalization" i += 1 assert description[i] == "LogUniformPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "0.001" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "0.01" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "GaussianPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "-1.0" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "mean" i += 1 assert description[i] == "0.5" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "2.0" i += 1
def test__identifier_description__after_model_and_instance(): model = af.CollectionPriorModel(gaussian=af.PriorModel( af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), sigma=af.GaussianPrior( mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0), )) max_log_likelihood_instance = model.instance_from_prior_medians() samples = af.m.MockSamples( max_log_likelihood_instance=max_log_likelihood_instance, gaussian_tuples=[(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]) search = af.m.MockSearch(prior_passer=af.PriorPasser( sigma=1.0, use_errors=True, use_widths=False)) result = af.Result(samples=samples, model=model, search=search) model.gaussian.centre = result.model.gaussian.centre model.gaussian.normalization = result.instance.gaussian.normalization identifier = Identifier([model]) description = identifier.description.splitlines() # THIS TEST FAILS DUE TO THE BUG DESCRIBED IN A GITHUB ISSUE. i = 0 assert description[i] == "CollectionPriorModel" i += 1 assert description[i] == "item_number" i += 1 assert description[i] == "0" i += 1 assert description[i] == "gaussian" i += 1 assert description[i] == "PriorModel" i += 1 assert description[i] == "cls" i += 1 assert description[i] == "autofit.example.model.Gaussian" i += 1 assert description[i] == "centre" i += 1 assert description[i] == "GaussianPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "0.0" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "mean" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "2.0" i += 1 assert description[i] == "normalization" i += 1 assert description[i] == "0.00316228" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "GaussianPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "-1.0" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "mean" i += 1 assert description[i] == "0.5" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "2.0" i += 1
def test__identifier_description(): model = af.CollectionPriorModel(gaussian=af.PriorModel( af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), sigma=af.GaussianPrior( mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0), )) identifier = Identifier([model]) description = identifier.description.splitlines() i = 0 assert description[i] == "CollectionPriorModel" i += 1 assert description[i] == "item_number" i += 1 assert description[i] == "0" i += 1 assert description[i] == "gaussian" i += 1 assert description[i] == "PriorModel" i += 1 assert description[i] == "cls" i += 1 assert description[i] == "autofit.example.model.Gaussian" i += 1 assert description[i] == "centre" i += 1 assert description[i] == "UniformPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "0.0" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "normalization" i += 1 assert description[i] == "LogUniformPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "0.001" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "0.01" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "GaussianPrior" i += 1 assert description[i] == "lower_limit" i += 1 assert description[i] == "-1.0" i += 1 assert description[i] == "upper_limit" i += 1 assert description[i] == "1.0" i += 1 assert description[i] == "mean" i += 1 assert description[i] == "0.5" i += 1 assert description[i] == "sigma" i += 1 assert description[i] == "2.0" i += 1
def make_pipeline( setup, phase_folders, real_space_mask, lens_redshift, source_redshift, instance, priors=None, pipeline_name="pipeline_source__inversion", transformer_class=al.TransformerNUFFT, auto_positions_factor=None, positions_threshold=None, sub_size=1, inversion_uses_border=True, inversion_pixel_limit=None, evidence_tolerance=10.0, ): phase_folders.append(pipeline_name) for type in ["general", "source"]: if hasattr(setup, type): if type == "general": phase_folders.append(setup.general.source_tag) if type == "source": setup.set_source_type(source_type=setup.source.inversion_tag) phase_folders.append(setup.source.tag) if "lens" in instance.keys(): lens = instance["lens"] else: raise ValueError("...") source = al.GalaxyModel( redshift=source_redshift, pixelization=al.pix.VoronoiMagnification, regularization=al.reg.Constant, ) source.pixelization.shape.shape_0 = af.UniformPrior(lower_limit=5, upper_limit=100) source.pixelization.shape.shape_1 = af.UniformPrior(lower_limit=5, upper_limit=100) source.regularization.coefficient = af.LogUniformPrior( lower_limit=10**-5.0, upper_limit=10**+5.0) phase1 = al.PhaseInterferometer( phase_name="phase_1__lens_instance__source_inversion", phase_folders=phase_folders, real_space_mask=real_space_mask, galaxies=dict( lens=lens, source=source, ), transformer_class=transformer_class, positions_threshold=positions_threshold, auto_positions_factor=auto_positions_factor, sub_size=sub_size, inversion_uses_border=inversion_uses_border, inversion_pixel_limit=inversion_pixel_limit, non_linear_class=af.MultiNest, ) phase1.optimizer.const_efficiency_mode = True phase1.optimizer.n_live_points = 20 phase1.optimizer.sampling_efficiency = 0.8 phase1.optimizer.evidence_tolerance = 0.1 return al.PipelineDataset(pipeline_name, phase1)
source_1 = al.GalaxyModel( redshift=source_redshift, pixelization=al.pix.VoronoiMagnification, regularization=al.reg.Constant, ) source_2 = al.GalaxyModel( redshift=source_redshift, pixelization=al.pix.VoronoiMagnification, regularization=al.reg.Constant, ) source_1.pixelization.shape.shape_0 = af.UniformPrior(lower_limit=5, upper_limit=50) source_1.pixelization.shape.shape_1 = af.UniformPrior(lower_limit=5, upper_limit=50) source_1.regularization.coefficient = af.LogUniformPrior( lower_limit=10**-6.0, upper_limit=10**8.0) source_2.pixelization.shape = source_1.pixelization.shape source_2.regularization.coefficient = af.LogUniformPrior( lower_limit=10**-6.0, upper_limit=10**8.0) # source_1.pixelization.shape = (15, 15) # source_1.regularization.coefficient = 5000.0 # source_2.pixelization.shape = (15, 15) # source_2.regularization.coefficient = 1000000.0 lens.mass.centre_0 = 0.0 lens.mass.centre_1 = 0.0 lens.mass.axis_ratio = 0.75 lens.mass.phi = 45.0 lens.mass.einstein_radius = 1.0
def make_pipeline(phase_folders=None): ### SETUP PIPELINE AND PHASE NAMES, TAGS AND PATHS ### # We setup the pipeline name using the tagging module. In this case, the pipeline name is not given a tag and # will be the string specified below However, its good practise to use the 'tag.' function below, incase # a pipeline does use customized tag names. pipeline_name = "pipeline__inversion" # This function uses the phase folders and pipeline name to set up the output directory structure, # e.g. 'autolens_workspace/output/phase_folder_1/phase_folder_2/pipeline_name/phase_name/' phase_folders.append(pipeline_name) # This is the same phase 1 as the complex source pipeline, which we saw gave a good fit to the overall # structure of the lensed source and provided an accurate lens mass model. phase1 = al.PhaseImaging( phase_name="phase_1__lens_sie__source_sersic", phase_folders=phase_folders, galaxies=dict( lens=al.GalaxyModel(redshift=0.5, mass=al.mp.EllipticalIsothermal), source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic), ), optimizer_class=af.MultiNest, ) phase1.optimizer.sampling_efficiency = 0.3 phase1.optimizer.const_efficiency_mode = True # Now, in phase 2, lets use the lens mass model to fit the source with an inversion. source = al.GalaxyModel( redshift=1.0, pixelization=al.pix.VoronoiMagnification, regularization=al.reg.Constant, ) # We can customize the inversion's priors like we do our light and mass profiles. source.pixelization.shape_0 = af.UniformPrior(lower_limit=20.0, upper_limit=40.0) source.pixelization.shape_1 = af.UniformPrior(lower_limit=20.0, upper_limit=40.0) # The expected value of the regularization coefficient depends on the details of the dataset reduction and # source galaxy. A broad log-uniform prior is thus an appropriate way to sample the large range of # possible values. source.regularization.coefficient = af.LogUniformPrior(lower_limit=1.0e-6, upper_limit=10000.0) phase2 = al.PhaseImaging( phase_name="phase_2__source_inversion_initialize", phase_folders=phase_folders, galaxies=dict( lens=al.GalaxyModel(redshift=0.5, mass=phase1.result.model.galaxies.lens.mass), source=source, ), optimizer_class=af.MultiNest, ) phase2.optimizer.sampling_efficiency = 0.3 phase2.optimizer.const_efficiency_mode = True # We now 'extend' phase 1 with an additional 'inversion phase' which uses the best-fit mass model of phase 1 above # to refine the it inversion used, by fitting only the pixelization & regularization parameters. # The the inversion phase results are accessible as attributes of the phase results and used in phase 3 below. phase2 = phase2.extend_with_inversion_phase() # Now, in phase 3, lets use the refined source inversion to fit the lens mass model again. phase3 = al.PhaseImaging( phase_name="phase_3__lens_sie__source_inversion", phase_folders=phase_folders, galaxies=dict( lens=al.GalaxyModel(redshift=0.5, mass=phase1.result.model.galaxies.lens.mass), source=al.GalaxyModel( redshift=1.0, pixelization=phase2.inversion.instance.galaxies.source. pixelization, regularization=phase2.inversion.instance.galaxies.source. regularization, ), ), optimizer_class=af.MultiNest, ) phase3.optimizer.sampling_efficiency = 0.3 phase3.optimizer.const_efficiency_mode = True return al.PipelineDataset(pipeline_name, phase1, phase2, phase3)