def test_dypolychord(self):
     fittingSequence = FittingSequence(self.kwargs_data_joint,
                                       self.kwargs_model,
                                       self.kwargs_constraints,
                                       self.kwargs_likelihood,
                                       self.kwargs_params)
     fitting_list = []
     kwargs_dypolychord = {
         'sampler_type': 'DYPOLYCHORD',
         'kwargs_run': {
             'ninit': 8,
             'nlive_const': 10,
             #'seed_increment': 1,
             'resume_dyn_run': False,
             #'init_step': 10,
         },
         'polychord_settings': {
             'seed': 1,
             #'num_repeats': 20
         },
         'dypolychord_dynamic_goal':
         0.8,  # 1 for posterior-only, 0 for evidence-only
         'remove_output_dir': True,
     }
     fitting_list.append(['nested_sampling', kwargs_dypolychord])
     chain_list = fittingSequence.fit_sequence(fitting_list)
Esempio n. 2
0
    def initialize_sampler(self, walker_ratio, chains_save_path):
        """
		Initialize the sampler to be used by run_samples.


		Parameters:
			walker_ratio (int): The number of walkers per free parameter.
				Must be at least 2.
			save_path (str): An h5 path specifying where to save the
				sampler chains. If a sampler chain is already present in the
				path it will be loaded.
		"""
        if self.image_selected is False:
            raise RuntimeError('Select an image before starting your sampler')

        # Set up the fitting sequence and fitting kwargs from lenstronomy
        self.walker_ratio = walker_ratio
        self.chains_save_path = chains_save_path
        ls_kwargs_data_joint = {
            'multi_band_list': self.ls_multi_band_list,
            'multi_band_type': 'multi-linear'
        }
        ls_kwargs_constraints = {}
        self.fitting_seq = FittingSequence(ls_kwargs_data_joint,
                                           self.ls_kwargs_model,
                                           ls_kwargs_constraints,
                                           self.ls_kwargs_likelihood,
                                           self.ls_kwargs_params)

        self.sampler_init = True
Esempio n. 3
0
 def build_fitting_seq(self):
     from lenstronomy.Workflow.fitting_sequence import FittingSequence
     self.fitting_seq = FittingSequence(self.kwargs_data_joint,
                                        self.kwargs_model,
                                        self.kwargs_constraints,
                                        self.kwargs_likelihood,
                                        self.kwargs_params)
    def test_multinest(self):
        # Nested sampler tests
        # further decrease the parameter space for nested samplers to run faster

        fittingSequence = FittingSequence(self.kwargs_data_joint,
                                          self.kwargs_model,
                                          self.kwargs_constraints,
                                          self.kwargs_likelihood,
                                          self.kwargs_params)
        fitting_list = []
        kwargs_update = {
            'ps_add_fixed': [[0, ['ra_source', 'dec_source'], [0, 0]]],
            'lens_light_add_fixed': [[
                0, ['n_sersic', 'R_sersic', 'center_x', 'center_y'],
                [4, .1, 0, 0]
            ]],
            'source_add_fixed': [[
                0, ['R_sersic', 'e1', 'e2', 'center_x', 'center_y'],
                [.6, .1, .1, 0, 0]
            ]],
            'lens_add_fixed': [[
                0, ['gamma', 'theta_E', 'e1', 'e2', 'center_x', 'center_y'],
                [1.8, 1., .1, .1, 0, 0]
            ], [1, ['gamma1', 'gamma2'], [0.01, 0.01]]],
            'change_source_lower_limit': [[0, ['n_sersic'], [2.9]]],
            'change_source_upper_limit': [[0, ['n_sersic'], [3.1]]]
        }
        fitting_list.append(['update_settings', kwargs_update])
        kwargs_multinest = {
            'sampler_type': 'MULTINEST',
            'kwargs_run': {
                'n_live_points': 10,
                'evidence_tolerance': 0.5,
                'sampling_efficiency':
                0.8,  # 1 for posterior-only, 0 for evidence-only
                'importance_nested_sampling': False,
                'multimodal': True,
                'const_efficiency_mode':
                False,  # reduce sampling_efficiency to 5% when True
            },
            'remove_output_dir': True,
        }
        fitting_list.append(['nested_sampling', kwargs_multinest])

        chain_list2 = fittingSequence.fit_sequence(fitting_list)
        kwargs_fixed = fittingSequence._updateManager.fixed_kwargs
        npt.assert_almost_equal(kwargs_fixed[0][1]['gamma1'], 0.01, decimal=2)
        assert fittingSequence._updateManager._lower_kwargs[1][0][
            'n_sersic'] == 2.9
        assert fittingSequence._updateManager._upper_kwargs[1][0][
            'n_sersic'] == 3.1

        kwargs_test = {'kwargs_lens': 1}
        fittingSequence.update_state(kwargs_test)
        kwargs_out = fittingSequence.best_fit(bijective=True)
        assert kwargs_out['kwargs_lens'] == 1
    def run_mcmc(self, mcmc_numerics):
        """Sample from the joint likelihood

        """
        fitting_seq = FittingSequence(self.kwargs_data_joint,
                                      self.kwargs_model,
                                      self.kwargs_constraints,
                                      self.kwargs_likelihood,
                                      self.kwargs_params)
        fitting_kwargs_list = [['MCMC', mcmc_numerics]]
        #with script_utils.HiddenPrints():
        chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)
        kwargs_result_mcmc = fitting_seq.best_fit()
        return chain_list_mcmc, kwargs_result_mcmc
    def test_fitting_sequence(self):
        kwargs_init = [
            self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light,
            self.kwargs_ps
        ]
        lens_sigma = [{
            'theta_E_sigma': 0.1,
            'gamma_sigma': 0.1,
            'ellipse_sigma': 0.1,
            'center_x_sigma': 0.1,
            'center_y_sigma': 0.1
        }, {
            'shear_sigma': 0.1
        }]
        source_sigma = [{
            'R_sersic_sigma': 0.05,
            'n_sersic_sigma': 0.5,
            'center_x_sigma': 0.1,
            'center_y_sigma': 0.1,
            'ellipse_sigma': 0.1
        }]
        lens_light_sigma = [{
            'R_sersic_sigma': 0.05,
            'n_sersic_sigma': 0.5,
            'center_x_sigma': 0.1,
            'center_y_sigma': 0.1
        }]
        ps_sigma = [{'pos_sigma': 1, 'point_amp_sigma': 1}]
        kwargs_sigma = [lens_sigma, source_sigma, lens_light_sigma, ps_sigma]
        kwargs_fixed = [[{}, {}], [{}], [{}], [{}]]
        kwargs_params = [
            kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_init, kwargs_init
        ]
        image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]
        multi_band_list = [image_band]
        fittingSequence = FittingSequence(multi_band_list, self.kwargs_model,
                                          self.kwargs_constraints,
                                          self.kwargs_likelihood,
                                          kwargs_params)

        lens_temp, source_temp, lens_light_temp, else_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fittingSequence.fit_sequence(
            fitting_kwargs_list=[])
        npt.assert_almost_equal(lens_temp[0]['theta_E'],
                                self.kwargs_lens[0]['theta_E'],
                                decimal=2)

        n_p = 2
        n_i = 2
        """
Esempio n. 7
0
    def test_get_galaxy_galaxy_recipe(self):
        """
        Test `get_galaxy_galaxy_recipe` method.
        :return:
        :rtype:
        """
        image = np.random.normal(size=(120, 120))
        kwargs_data_joint = {
            'multi_band_list': [[{
                'image_data':
                image,
                'background_rms':
                0.01,
                'exposure_time':
                np.ones_like(image),
                'ra_at_xy_0':
                0.,
                'dec_at_xy_0':
                0.,
                'transform_pix2angle':
                np.array([[-0.01, 0], [0, 0.01]])
            }, {}, {}]],
            'multi_band_type':
            'multi-linear'
        }
        fitting_kwargs_list = self.recipe.get_galaxy_galaxy_recipe(
            kwargs_data_joint)
        assert isinstance(fitting_kwargs_list, list)

        # test the recipe by running it fully
        config = deepcopy(self.config)
        config.settings['model']['source_light'] = ['SHAPELETS']

        recipe = Recipe(config)

        fitting_sequence = FittingSequence(
            kwargs_data_joint,
            config.get_kwargs_model(),
            config.get_kwargs_constraints(),
            config.get_kwargs_likelihood(),
            config.get_kwargs_params(),
        )

        fitting_kwargs_list = recipe.get_recipe(
            kwargs_data_joint=kwargs_data_joint, recipe_name='galaxy-galaxy')

        fitting_sequence.fit_sequence(fitting_kwargs_list)
    def test_fitting_sequence(self):
        #kwargs_init = [self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps]
        lens_sigma = [{'theta_E': 0.1, 'gamma': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1}, {'e1': 0.1, 'e2': 0.1}]
        lens_lower = [{'theta_E': 0., 'gamma': 1.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}, {'e1': -0.3, 'e2': -0.3}]
        lens_upper = [{'theta_E': 10., 'gamma': 2.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}, {'e1': 0.3, 'e2': 0.3}]
        source_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1, 'e1': 0.1, 'e2': 0.1}]
        source_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}]
        source_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}]

        lens_light_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1}]
        lens_light_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2}]
        lens_light_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2}]
        ps_sigma = [{'ra_source': 1, 'dec_source': 1, 'point_amp': 1}]

        lens_param = self.kwargs_lens, lens_sigma, [{}, {}], lens_lower, lens_upper
        source_param = self.kwargs_source, source_sigma, [{}], source_lower, source_upper
        lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{}], lens_light_lower, lens_light_upper
        ps_param = self.kwargs_ps, ps_sigma, [{}], self.kwargs_ps, self.kwargs_ps

        kwargs_params = {'lens_model': lens_param,
                         'source_model': source_param,
                         'lens_light_model': lens_light_param,
                         'point_source_model': ps_param,
                         #'cosmography': cosmo_param
        }
        #kwargs_params = [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_init, kwargs_init]
        image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]
        multi_band_list = [image_band]
        fittingSequence = FittingSequence(multi_band_list, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, kwargs_params)

        lens_temp, source_temp, lens_light_temp, else_temp, cosmo_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fittingSequence.fit_sequence(fitting_kwargs_list=[])
        npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=2)

        n_p = 2
        n_i = 2
        
        fitting_kwargs_list = [
            {'fitting_routine': 'PSO', 'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i},
            {'fitting_routine': 'MCMC', 'sigma_scale': 0.1, 'n_burn': 1, 'n_run': 1, 'walkerRatio': 2},
            {'fitting_routine': 'align_images', 'lower_limit_shift': -0.1, 'upper_limit_shift': 0.1, 'n_particles': 2, 'n_iterations': 2},
            {'fitting_routine': 'psf_iteration', 'psf_iter_num': 2, 'psf_iter_factor': 0.5, 'kwargs_psf_iter': {'stacking_option': 'mean'}}
        ]
        lens_temp, source_temp, lens_light_temp, else_temp, cosmo_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fittingSequence.fit_sequence(fitting_kwargs_list=fitting_kwargs_list)
        npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=1)
    def test_nautilus(self):
        kwargs_params = copy.deepcopy(self.kwargs_params)
        fittingSequence = FittingSequence(self.kwargs_data_joint,
                                          self.kwargs_model,
                                          self.kwargs_constraints,
                                          self.kwargs_likelihood,
                                          kwargs_params)

        fitting_list = []
        kwargs_nautilus = {
            'prior_type': 'uniform',
            'thread_count': 1,
            'verbose': True,
            'one_step': True,
            'n_live': 2,
            'random_state': 42
        }

        fitting_list.append(['Nautilus', kwargs_nautilus])
        chain_list = fittingSequence.fit_sequence(fitting_list)
    def test_dynesty(self):
        kwargs_params = copy.deepcopy(self.kwargs_params)
        kwargs_params['lens_model'][0][0]['theta_E'] += 0.01
        fittingSequence = FittingSequence(self.kwargs_data_joint,
                                          self.kwargs_model,
                                          self.kwargs_constraints,
                                          self.kwargs_likelihood,
                                          kwargs_params)

        fitting_list = []
        kwargs_dynesty = {
            'sampler_type': 'DYNESTY',
            'kwargs_run': {
                'dlogz_init': 0.01,
                'nlive_init': 6,
                'nlive_batch': 6,
                'maxbatch': 1,
            },
        }

        fitting_list.append(['nested_sampling', kwargs_dynesty])
        chain_list = fittingSequence.fit_sequence(fitting_list)
Esempio n. 11
0
 def __init__(self, kwargs_data_joint, kwargs_model,lens_params,source_params,
              lenslight_params=None, kwargs_constraints=None, kwargs_likelihood=None):
     """
     class to  manage cluster source reconstruction.
     This class inherited the FittingSequence class in Workflow module of lenstronomy.
     :param kwargs_data_joint: keywords arguments of [data, psf, numericals] in lenstronomy convention.
     :param kwargs_model: name of model list
     :param lens_params: lens model keywords arguments [kwargs_lens_init, kwargs_lens_sigma, kwargs_fixed_lens, kwargs_lower_lens, kwargs_upper_lens]
     :param source_params: source model keywords arguments [kwargs_source_init, kwargs_source_sigma, kwargs_fixed_source, kwargs_lower_source, kwargs_upper_source]
     :param kwargs_constraints: contraints on models
     :param kwargs_likelihood: options of calculating likelihood, see more: LikelihoodModule class in Sampling module of lenstronomy.
     """
     self.kwargs_data_joint =kwargs_data_joint
     self.multi_band_list = kwargs_data_joint.get('multi_band_list', [])
     self.kwargs_model =kwargs_model
     kwargs_params = {'lens_model': lens_params, 'source_model': source_params, 'lens_light_model': lenslight_params}
     self.kwargs_params= kwargs_params
     if kwargs_constraints is None:
         kwargs_constraints ={}
     if kwargs_likelihood is None:
         kwargs_likelihood = {'source_marg': False, 'check_positive_flux': True}
     self.fitting_seq_src = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)
    def test_zeus(self):
        # we make a very basic lens+source model to feed to check zeus can be run through fitting sequence
        # we don't use the kwargs defined in setup() as those are modified during the tests; using unique kwargs here is safer

        # data specifics
        sigma_bkg = 0.05  # background noise per pixel
        exp_time = 100  # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
        numPix = 10  # cutout pixel size
        deltaPix = 0.05  # pixel size in arcsec (area per pixel = deltaPix**2)
        fwhm = 0.5  # full width half max of PSF

        # PSF specification

        kwargs_data = sim_util.data_configure_simple(numPix, deltaPix,
                                                     exp_time, sigma_bkg)
        data_class = ImageData(**kwargs_data)
        kwargs_psf_gaussian = {
            'psf_type': 'GAUSSIAN',
            'fwhm': fwhm,
            'pixel_size': deltaPix,
            'truncation': 3
        }
        psf_gaussian = PSF(**kwargs_psf_gaussian)
        kwargs_psf = {
            'psf_type': 'PIXEL',
            'kernel_point_source': psf_gaussian.kernel_point_source,
            'psf_error_map': np.zeros_like(psf_gaussian.kernel_point_source)
        }
        psf_class = PSF(**kwargs_psf)

        # make a lens
        lens_model_list = ['EPL']
        kwargs_epl = {
            'theta_E': 0.6,
            'gamma': 2.6,
            'center_x': 0.0,
            'center_y': 0.0,
            'e1': 0.1,
            'e2': 0.1
        }
        kwargs_lens = [kwargs_epl]
        lens_model_class = LensModel(lens_model_list=lens_model_list)

        # make a source
        source_model_list = ['SERSIC_ELLIPSE']
        kwargs_sersic_ellipse = {
            'amp': 1.,
            'R_sersic': 0.6,
            'n_sersic': 3,
            'center_x': 0.0,
            'center_y': 0.0,
            'e1': 0.1,
            'e2': 0.1
        }
        kwargs_source = [kwargs_sersic_ellipse]
        source_model_class = LightModel(light_model_list=source_model_list)

        kwargs_numerics = {
            'supersampling_factor': 1,
            'supersampling_convolution': False
        }

        imageModel = ImageModel(data_class,
                                psf_class,
                                lens_model_class,
                                source_model_class,
                                kwargs_numerics=kwargs_numerics)
        image_sim = sim_util.simulate_simple(imageModel, kwargs_lens,
                                             kwargs_source)

        data_class.update_data(image_sim)

        kwargs_data['image_data'] = image_sim

        kwargs_model = {
            'lens_model_list': lens_model_list,
            'source_light_model_list': source_model_list
        }

        lens_fixed = [{}]
        lens_sigma = [{
            'theta_E': 0.1,
            'gamma': 0.1,
            'e1': 0.1,
            'e2': 0.1,
            'center_x': 0.1,
            'center_y': 0.1
        }]
        lens_lower = [{
            'theta_E': 0.,
            'gamma': 1.5,
            'center_x': -2,
            'center_y': -2,
            'e1': -0.4,
            'e2': -0.4
        }]
        lens_upper = [{
            'theta_E': 10.,
            'gamma': 2.5,
            'center_x': 2,
            'center_y': 2,
            'e1': 0.4,
            'e2': 0.4
        }]

        source_fixed = [{}]
        source_sigma = [{
            'R_sersic': 0.05,
            'n_sersic': 0.5,
            'center_x': 0.1,
            'center_y': 0.1,
            'e1': 0.1,
            'e2': 0.1
        }]
        source_lower = [{
            'R_sersic': 0.01,
            'n_sersic': 0.5,
            'center_x': -2,
            'center_y': -2,
            'e1': -0.4,
            'e2': -0.4
        }]
        source_upper = [{
            'R_sersic': 10,
            'n_sersic': 5.5,
            'center_x': 2,
            'center_y': 2,
            'e1': 0.4,
            'e2': 0.4
        }]

        lens_param = [
            kwargs_lens, lens_sigma, lens_fixed, lens_lower, lens_upper
        ]
        source_param = [
            kwargs_source, source_sigma, source_fixed, source_lower,
            source_upper
        ]

        kwargs_params = {
            'lens_model': lens_param,
            'source_model': source_param
        }

        kwargs_constraints = {}

        multi_band_list = [[kwargs_data, kwargs_psf, kwargs_numerics]]

        kwargs_data_joint = {
            'multi_band_list': multi_band_list,
            'multi_band_type': 'multi-linear'
        }

        kwargs_likelihood = {'source_marg': True}

        fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model,
                                          kwargs_constraints,
                                          kwargs_likelihood, kwargs_params)

        fitting_list = []
        kwargs_zeus = {
            'sampler_type': 'ZEUS',
            'n_burn': 2,
            'n_run': 2,
            'walkerRatio': 4
        }

        fitting_list.append(['MCMC', kwargs_zeus])

        chain_list = fittingSequence.fit_sequence(fitting_list)
    def test_fitting_sequence(self):

        fittingSequence = FittingSequence(self.kwargs_data_joint,
                                          self.kwargs_model,
                                          self.kwargs_constraints,
                                          self.kwargs_likelihood,
                                          self.kwargs_params)

        kwargs_result = fittingSequence.best_fit(bijective=False)
        lens_temp = kwargs_result['kwargs_lens']
        npt.assert_almost_equal(lens_temp[0]['theta_E'],
                                self.kwargs_lens[0]['theta_E'],
                                decimal=2)

        logL = fittingSequence.best_fit_likelihood
        print(logL, 'test')
        #print(lens_temp, source_temp, lens_light_temp, ps_temp, cosmo_temp)
        assert logL < 0
        bic = fittingSequence.bic
        assert bic > 0
        #npt.assert_almost_equal(bic, 20000000220.29376, decimal=-4)

        #npt.assert_almost_equal(logL, -10000000061.792593, decimal=-4)

        n_p = 2
        n_i = 2
        fitting_list = []

        kwargs_pso = {
            'sigma_scale': 1,
            'n_particles': n_p,
            'n_iterations': n_i
        }
        fitting_list.append(['PSO', kwargs_pso])
        kwargs_align = {
            'lowerLimit': -0.1,
            'upperLimit': 0.1,
            'n_particles': 2,
            'n_iterations': 2
        }
        fitting_list.append(['align_images', kwargs_align])
        kwargs_psf_iter = {
            'num_iter': 2,
            'psf_iter_factor': 0.5,
            'stacking_method': 'mean',
            'new_procedure': False
        }
        fitting_list.append(['psf_iteration', kwargs_psf_iter])
        fitting_list.append(['restart', None])
        fitting_list.append(['fix_not_computed', {'free_bands': [True]}])
        n_sersic_overwrite = 4
        kwargs_update = {
            'lens_light_add_fixed': [[0, ['n_sersic'], [n_sersic_overwrite]]],
            'lens_light_remove_fixed': [[0, ['center_x']]],
            'change_source_lower_limit': [[0, ['n_sersic'], [0.1]]],
            'change_source_upper_limit': [[0, ['n_sersic'], [10]]]
        }
        fitting_list.append(['update_settings', kwargs_update])

        chain_list = fittingSequence.fit_sequence(fitting_list)
        lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed, extinction_fixed = fittingSequence._updateManager.fixed_kwargs
        kwargs_result = fittingSequence.best_fit(bijective=False)
        npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'],
                                self.kwargs_lens[0]['theta_E'],
                                decimal=1)
        npt.assert_almost_equal(
            fittingSequence._updateManager._lens_light_fixed[0]['n_sersic'],
            n_sersic_overwrite,
            decimal=8)
        npt.assert_almost_equal(lens_light_fixed[0]['n_sersic'], 4, decimal=-1)
        assert fittingSequence._updateManager._lower_kwargs[1][0][
            'n_sersic'] == 0.1
        assert fittingSequence._updateManager._upper_kwargs[1][0][
            'n_sersic'] == 10

        # test 'set_param_value' fitting sequence
        fitting_list = [[
            'set_param_value', {
                'lens': [[1, ['gamma1'], [0.013]]]
            }
        ], ['set_param_value', {
            'lens_light': [[0, ['center_x'], [0.009]]]
        }], ['set_param_value', {
            'source': [[0, ['n_sersic'], [2.993]]]
        }], ['set_param_value', {
            'ps': [[0, ['ra_source'], [0.007]]]
        }]]

        fittingSequence.fit_sequence(fitting_list)

        kwargs_set = fittingSequence._updateManager.parameter_state
        assert kwargs_set['kwargs_lens'][1]['gamma1'] == 0.013
        assert kwargs_set['kwargs_lens_light'][0]['center_x'] == 0.009
        assert kwargs_set['kwargs_source'][0]['n_sersic'] == 2.993
        assert kwargs_set['kwargs_ps'][0]['ra_source'] == 0.007
Esempio n. 14
0
def make_lensmodel(lens_info, theta_E, source_info, box_f):
    # lens data specifics
    lens_image = lens_info['image']
    psf_lens = lens_info['psf']
    background_rms = background_rms_image(5, lens_image)
    exposure_time = 100
    kwargs_data_lens = sim_util.data_configure_simple(len(lens_image),
                                                      lens_info['deltapix'],
                                                      exposure_time,
                                                      background_rms)
    kwargs_data_lens['image_data'] = lens_image
    data_class_lens = ImageData(**kwargs_data_lens)
    #PSF
    kwargs_psf_lens = {
        'psf_type': 'PIXEL',
        'pixel_size': lens_info['deltapix'],
        'kernel_point_source': psf_lens
    }
    psf_class_lens = PSF(**kwargs_psf_lens)
    # lens light model
    lens_light_model_list = ['SERSIC_ELLIPSE']
    lens_light_model_class = LightModel(light_model_list=lens_light_model_list)
    kwargs_model = {'lens_light_model_list': lens_light_model_list}
    kwargs_numerics_galfit = {'supersampling_factor': 1}
    kwargs_constraints = {}
    kwargs_likelihood = {'check_bounds': True}
    image_band = [kwargs_data_lens, kwargs_psf_lens, kwargs_numerics_galfit]
    multi_band_list = [image_band]
    kwargs_data_joint = {
        'multi_band_list': multi_band_list,
        'multi_band_type': 'multi-linear'
    }
    # Sersic component
    fixed_lens_light = [{}]
    kwargs_lens_light_init = [{
        'R_sersic': .1,
        'n_sersic': 4,
        'e1': 0,
        'e2': 0,
        'center_x': 0,
        'center_y': 0
    }]
    kwargs_lens_light_sigma = [{
        'n_sersic': 0.5,
        'R_sersic': 0.2,
        'e1': 0.1,
        'e2': 0.1,
        'center_x': 0.1,
        'center_y': 0.1
    }]
    kwargs_lower_lens_light = [{
        'e1': -0.5,
        'e2': -0.5,
        'R_sersic': 0.01,
        'n_sersic': 0.5,
        'center_x': -10,
        'center_y': -10
    }]
    kwargs_upper_lens_light = [{
        'e1': 0.5,
        'e2': 0.5,
        'R_sersic': 10,
        'n_sersic': 8,
        'center_x': 10,
        'center_y': 10
    }]
    lens_light_params = [
        kwargs_lens_light_init, kwargs_lens_light_sigma, fixed_lens_light,
        kwargs_lower_lens_light, kwargs_upper_lens_light
    ]
    kwargs_params = {'lens_light_model': lens_light_params}
    fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model,
                                  kwargs_constraints, kwargs_likelihood,
                                  kwargs_params)
    fitting_kwargs_list = [[
        'PSO', {
            'sigma_scale': 1.,
            'n_particles': 50,
            'n_iterations': 50
        }
    ]]
    chain_list = fitting_seq.fit_sequence(fitting_kwargs_list)
    kwargs_result = fitting_seq.best_fit()
    modelPlot = ModelPlot(multi_band_list, kwargs_model, kwargs_result)
    # Lens light best result
    kwargs_light_lens = kwargs_result['kwargs_lens_light'][0]
    #Lens model
    kwargs_lens_list = [{
        'theta_E': theta_E,
        'e1': kwargs_light_lens['e1'],
        'e2': kwargs_light_lens['e2'],
        'center_x': kwargs_light_lens['center_x'],
        'center_y': kwargs_light_lens['center_y']
    }]
    lensModel = LensModel(['SIE'])
    lme = LensModelExtensions(lensModel)
    #random position for the source
    x_crit_list, y_crit_list = lme.critical_curve_tiling(
        kwargs_lens_list,
        compute_window=(len(source_info['image'])) * (source_info['deltapix']),
        start_scale=source_info['deltapix'],
        max_order=10)
    if len(x_crit_list) > 2 and len(y_crit_list) > 2:
        x_caustic_list, y_caustic_list = lensModel.ray_shooting(
            x_crit_list, y_crit_list, kwargs_lens_list)
        xsamp0 = np.arange(
            min(x_caustic_list) - min(x_caustic_list) * box_f[0],
            max(x_caustic_list) + max(x_caustic_list) * box_f[1], 0.1)
        xsamp = xsamp0[abs(xsamp0.round(1)) != 0.1]
        ysamp0 = np.arange(
            min(y_caustic_list) - min(y_caustic_list) * box_f[0],
            max(y_caustic_list) + max(y_caustic_list) * box_f[1], 0.1)
        ysamp = ysamp0[abs(ysamp0.round(1)) != 0.1]
        if len(xsamp) == 0 or len(ysamp) == 0:
            x_shift, y_shift = 0.15, 0.15  #arcseconds
        else:
            y_shift = rand.sample(list(ysamp), 1)[0]
            x_shift = rand.sample(list(xsamp), 1)[0]
    else:
        x_shift, y_shift = -0.15, 0.15  #arcseconds
        x_caustic_list = [0]
        y_caustic_list = [0]
    solver = LensEquationSolver(lensModel)
    theta_ra, theta_dec = solver.image_position_from_source(
        x_shift, y_shift, kwargs_lens_list)
    if len(theta_ra) <= 1:
        x_shift, y_shift = -0.2, -0.2  #arcseconds1
    if abs(x_shift) >= int(theta_E) or abs(y_shift) >= int(theta_E):
        x_shift, y_shift = 0.3, -0.3
        print('BLABLA')
    print('HERE',
          min(x_caustic_list) - min(x_caustic_list) * box_f[0],
          max(x_caustic_list) + max(x_caustic_list) * box_f[1],
          min(y_caustic_list) - min(y_caustic_list) * box_f[0],
          max(y_caustic_list) + max(y_caustic_list) * box_f[1])
    return {
        'lens_light_model_list': ['SERSIC_ELLIPSE'],
        'kwargs_light_lens': [kwargs_light_lens],
        'lens_light_model_class': lens_light_model_class,
        'kwargs_lens_list': kwargs_lens_list,
        'kwargs_data_lens': kwargs_data_lens,
        'source_shift': [x_shift, y_shift]
    }
def main():
    args = parse_args()
    test_cfg = TestConfig.from_file(args.test_config_file_path)
    train_val_cfg = TrainValConfig.from_file(test_cfg.train_val_config_file_path)
    baobab_cfg = get_baobab_config(test_cfg.data.test_dir)
    # Set device and default data type
    device = torch.device(test_cfg.device_type)
    if device.type == 'cuda':
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')
    seed_everything(test_cfg.global_seed)
    
    ############
    # Data I/O #
    ############
    test_data = XYCosmoData(test_cfg.data.test_dir, data_cfg=train_val_cfg.data)
    master_truth = test_data.cosmo_df
    master_truth = metadata_utils.add_qphi_columns(master_truth)
    master_truth = metadata_utils.add_gamma_psi_ext_columns(master_truth)
    if test_cfg.data.lens_indices is None:
        if args.lens_indices_path is None:
            # Test on all n_test lenses in the test set
            n_test = test_cfg.data.n_test 
            lens_range = range(n_test)
        else:
            # Test on the lens indices in a text file at the specified path
            lens_range = []
            with open(args.lens_indices_path, "r") as f:
                for line in f:
                    lens_range.append(int(line.strip()))
            n_test = len(lens_range)
            print("Performing H0 inference on {:d} specified lenses...".format(n_test))
    else:
        if args.lens_indices_path is None:
            # Test on the lens indices specified in the test config file
            lens_range = test_cfg.data.lens_indices
            n_test = len(lens_range)
            print("Performing H0 inference on {:d} specified lenses...".format(n_test))
        else:
            raise ValueError("Specific lens indices were specified in both the test config file and the command-line argument.")
    batch_size = max(lens_range) + 1
    # Output directory into which the H0 histograms and H0 samples will be saved
    out_dir = test_cfg.out_dir
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print("Destination folder path: {:s}".format(out_dir))
    else:
        raise OSError("Destination folder already exists.")

    ######################
    # Load trained state #
    ######################
    # Instantiate loss function, to append to the MCMC objective as the prior
    orig_Y_cols = train_val_cfg.data.Y_cols
    # Instantiate MCMC parameter penalty function
    params_to_remove = ['lens_light_R_sersic'] #'src_light_R_sersic'] 
    mcmc_Y_cols = [col for col in orig_Y_cols if col not in params_to_remove]
    mcmc_Y_dim = len(mcmc_Y_cols)
    null_spread = True
    #init_D_dt = np.random.uniform(0.0, 10000.0, size=(batch_size, n_walkers, 1)) # FIXME: init H0 hardcoded

    kwargs_model = dict(lens_model_list=['PEMD', 'SHEAR'],
                        point_source_model_list=['SOURCE_POSITION'],
                        source_light_model_list=['SERSIC_ELLIPSE'])
    astro_sig = test_cfg.image_position_likelihood.sigma
    # Get H0 samples for each system
    if not test_cfg.time_delay_likelihood.baobab_time_delays:
        if 'abcd_ordering_i' not in master_truth:
            raise ValueError("If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec.")
    kwargs_lens_eq_solver = {'min_distance': 0.05, 'search_window': baobab_cfg.instrument.pixel_scale*baobab_cfg.image.num_pix, 'num_iter_max': 100}
    #n_walkers = test_cfg.numerics.mcmc.walkerRatio*(mcmc_Y_dim + 1) # BNN params + H0 times walker ratio
    #init_pos = np.tile(master_truth[mcmc_Y_cols].iloc[:batch_size].values[:, np.newaxis, :], [1, n_walkers, 1])
    #init_D_dt = np.random.uniform(0.0, 10000.0, size=(batch_size, n_walkers, 1))
    #print(init_pos.shape, init_D_dt.shape)

    total_progress = tqdm(total=n_test)
    # For each lens system...
    for i, lens_i in enumerate(lens_range):
        # Each lens gets a unique random state for td and vd measurement error realizations.
        rs_lens = np.random.RandomState(lens_i)
        ###########################
        # Relevant data and prior #
        ###########################
        data_i = master_truth.iloc[lens_i].copy()
        # Init values for the lens model params
        init_info = dict(zip(mcmc_Y_cols, data_i[mcmc_Y_cols].values)) # truth params
        lcdm = LCDM(z_lens=data_i['z_lens'], z_source=data_i['z_src'], flat=True)
        true_img_dec = np.array(literal_eval(data_i['y_image']))
        n_img = len(true_img_dec)
        true_td = np.array(literal_eval(data_i['true_td']))
        measured_td = true_td + rs_lens.randn(*true_td.shape)*test_cfg.error_model.time_delay_error
        measured_td_sig = test_cfg.time_delay_likelihood.sigma # np.ones(n_img - 1)*
        measured_img_dec = true_img_dec + rs_lens.randn(n_img)*astro_sig
        increasing_dec_i = np.argsort(true_img_dec) #np.argsort(measured_img_dec)
        measured_td = h0_utils.reorder_to_tdlmc(measured_td, increasing_dec_i, range(n_img)) # need to use measured dec to order
        measured_img_dec = h0_utils.reorder_to_tdlmc(measured_img_dec, increasing_dec_i, range(n_img))
        measured_td_wrt0 = measured_td[1:] - measured_td[0]   
        kwargs_data_joint = dict(time_delays_measured=measured_td_wrt0,
                                 time_delays_uncertainties=measured_td_sig,
                                 )

        #############################
        # Parameter init and bounds #
        #############################
        lens_kwargs = mcmc_utils.get_lens_kwargs(init_info, null_spread=null_spread)
        ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(init_info, astro_sig, null_spread=null_spread)
        src_light_kwargs = mcmc_utils.get_light_kwargs(init_info['src_light_R_sersic'], null_spread=null_spread)
        special_kwargs = mcmc_utils.get_special_kwargs(n_img, astro_sig, D_dt_sigma=2000, null_spread=null_spread) # image position offset and time delay distance, aka the "special" parameters
        kwargs_params = {'lens_model': lens_kwargs,
                         'point_source_model': ps_kwargs,
                         'source_model': src_light_kwargs,
                         'special': special_kwargs,}
        if test_cfg.numerics.solver_type == 'NONE':
            solver_type = 'NONE'
        else:
            solver_type = 'PROFILE_SHEAR' if n_img == 4 else 'CENTER'
        #solver_type = 'NONE'
        kwargs_constraints = {'num_point_source_list': [n_img],  
                              'Ddt_sampling': True,
                              'solver_type': solver_type,}

        kwargs_likelihood = {'time_delay_likelihood': True,
                             'sort_images_by_dec': True,
                             'prior_lens': [],
                             'prior_special': [],
                             'check_bounds': True, 
                             'check_matched_source_position': False,
                             'source_position_tolerance': 0.01,
                             'source_position_sigma': 0.01,
                             'source_position_likelihood': False,
                             'custom_logL_addition': None,
                             'kwargs_lens_eq_solver': kwargs_lens_eq_solver}

        ###########################
        # MCMC posterior sampling #
        ###########################
        fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, verbose=False, mpi=False)
        if i == 0:
            param_class = fitting_seq._updateManager.param_class
            n_params, param_class_Y_cols = param_class.num_param()
            #init_pos = mcmc_utils.reorder_to_param_class(mcmc_Y_cols, param_class_Y_cols, init_pos, init_D_dt)
        # MCMC sample from the post-processed BNN posterior jointly with cosmology
        lens_i_start_time = time.time()
        #test_cfg.numerics.mcmc.update(init_samples=init_pos[lens_i, :, :])
        fitting_kwargs_list_mcmc = [['MCMC', test_cfg.numerics.mcmc]]
        #with HiddenPrints():
        #try:
        chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list_mcmc)
        kwargs_result_mcmc = fitting_seq.best_fit()
        #except:
        #    print("lens {:d} skipped".format(lens_i))
        #    total_progress.update(1)
        #    continue
        lens_i_end_time = time.time()
        inference_time = (lens_i_end_time - lens_i_start_time)/60.0 # min

        #############################
        # Plotting the MCMC samples #
        #############################
        # sampler_type : 'EMCEE'
        # samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]`
        # param_mcmc : list of str of length n_params, the parameter names
        sampler_type, samples_mcmc, param_mcmc, _  = chain_list_mcmc[0]
        new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain(kwargs_result_mcmc, samples_mcmc, kwargs_model, lens_kwargs[2], ps_kwargs[2], src_light_kwargs[2], special_kwargs[2], kwargs_constraints)
        # Plot D_dt histogram
        D_dt_samples = new_samples_mcmc['D_dt'].values
        true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.3)
        data_i['D_dt'] = true_D_dt
        # Export D_dt samples for this lens
        lens_inference_dict = dict(
                                   D_dt_samples=D_dt_samples, # kappa_ext=0 for these samples
                                   inference_time=inference_time,
                                   true_D_dt=true_D_dt, 
                                   )
        lens_inference_dict_save_path = os.path.join(out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i))
        np.save(lens_inference_dict_save_path, lens_inference_dict)
        # Optionally export the MCMC samples
        if test_cfg.export.mcmc_samples:
            mcmc_samples_path = os.path.join(out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i))
            new_samples_mcmc.to_csv(mcmc_samples_path, index=None)
        # Optionally export the D_dt histogram
        if test_cfg.export.D_dt_histogram:
            cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal(D_dt_samples, 3)
            _ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples, lens_i, true_D_dt, save_dir=out_dir)
        # Optionally export the plot of MCMC chain
        if test_cfg.export.mcmc_chain:
            mcmc_chain_path = os.path.join(out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i))
            plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path)
        # Optionally export posterior cornerplot of select lens model parameters with D_dt
        if test_cfg.export.mcmc_corner:
            mcmc_corner_path = os.path.join(out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i))
            plotting_utils.plot_mcmc_corner(new_samples_mcmc[test_cfg.export.mcmc_cols], data_i[test_cfg.export.mcmc_cols], test_cfg.export.mcmc_col_labels, mcmc_corner_path)
        total_progress.update(1)
        gc.collect()
    total_progress.close()
Esempio n. 16
0
	kwargs_lens_light_init = []
	kwargs_lens_light_sigma = []
	kwargs_lower_lens_light = []
	kwargs_upper_lens_light = []

# first Sersic component
	fixed_lens_light.append({})
	kwargs_lens_light_init.append({'R_sersic': .1, 'n_sersic': 4, 'e1': 0, 'e2': 0, 'center_x': 0, 'center_y': 0})
	kwargs_lens_light_sigma.append({'n_sersic': 0.5, 'R_sersic': 0.2, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
	kwargs_lower_lens_light.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -10, 'center_y': -10})
	kwargs_upper_lens_light.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 10, 'n_sersic': 8, 'center_x': 10, 'center_y': 10})

	lens_light_params = [kwargs_lens_light_init, kwargs_lens_light_sigma, fixed_lens_light, kwargs_lower_lens_light, kwargs_upper_lens_light]
	kwargs_params = {'lens_light_model': lens_light_params}

	fitting_seq = FittingSequence(multi_band_list, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)

# n_particles is the number of particles when you can test the parementers, so if you have 5 the code will be tested in 5 positions
# more particles means the fitting is tested in more places so the guess will not converge a local minimum
# but more particles is also time consuming
# n_iterations is the number of iterations in the code and there is no trivial way to know how many are necesary to make the code converge
	fitting_kwargs_list = [{'fitting_routine': 'PSO', 'mpi': False, 'sigma_scale': 1., 'n_particles': 100,'n_iterations': 90}]
	
	lens_result, source_result, lens_light_result, ps_result, cosmo_result, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)

	lensPlot = LensModelPlot(kwargs_data_mask, kwargs_psf, kwargs_numerics, kwargs_model, lens_result, source_result,
                             lens_light_result, ps_result, arrow_size=0.02, cmap_string="gist_heat")
# this contain the best parameters from fitting the light of the galaxy (light, ellipticity and position)
	LLR = lens_light_result[0]
	
Esempio n. 17
0
def fit_galaxy(galaxy_im, psf_ave, psf_std=None, source_params=None, background_rms=0.04, pix_sz = 0.08,
            exp_time = 300., fix_n=None, image_plot = True, corner_plot=True,
            deep_seed = False, galaxy_msk=None, galaxy_std=None, flux_corner_plot = False,
            tag = None, no_MCMC= False, pltshow = 1, return_Chisq = False, dump_result = False, pso_diag=False):
    '''
    A quick fit for the QSO image with (so far) single sersice + one PSF. The input psf noise is optional.
    
    Parameter
    --------
        galaxy_im: An array of the QSO image.
        psf_ave: The psf image.
        psf_std: The psf noise, optional.
        source_params: The prior for the source. Default is given.
        background_rms: default as 0.04
        exp_time: default at 2400.
        deep_seed: if Ture, more mcmc steps will be performed.
        tag: The name tag for save the plot
            
    Return
    --------
        Will output the fitted image (Set image_plot = True), the corner_plot and the flux_ratio_plot.
        source_result, ps_result, image_ps, image_host
    
    To do
    --------
        
    '''
    # data specifics need to set up based on the data situation
    background_rms = background_rms  #  background noise per pixel (Gaussian)
    exp_time = exp_time  #  exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
    numPix = len(galaxy_im)  #  cutout pixel size
    deltaPix = pix_sz
    if psf_ave is not None:
        psf_type = 'PIXEL'  # 'gaussian', 'pixel', 'NONE'
        kernel = psf_ave
    
#    if psf_std is not None:
#        kwargs_numerics = {'subgrid_res': 1, 'psf_error_map': True}     #Turn on the PSF error map
#    else: 
    kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False}
        
    if source_params is None:
        # here are the options for the host galaxy fitting
        fixed_source = []
        kwargs_source_init = []
        kwargs_source_sigma = []
        kwargs_lower_source = []
        kwargs_upper_source = []
        # Disk component, as modelled by an elliptical Sersic profile
        if fix_n == None:
            fixed_source.append({})  # we fix the Sersic index to n=1 (exponential)
            kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': 2., 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
            kwargs_source_sigma.append({'n_sersic': 0.5, 'R_sersic': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
            kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.01, 'n_sersic': 0.3, 'center_x': -10, 'center_y': -10})
            kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3., 'n_sersic': 7., 'center_x': 10, 'center_y': 10})
        elif fix_n is not None:
            fixed_source.append({'n_sersic': fix_n})
            kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': fix_n, 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
            kwargs_source_sigma.append({'n_sersic': 0.001, 'R_sersic': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
            kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.01, 'n_sersic': fix_n, 'center_x': -10, 'center_y': -10})
            kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3, 'n_sersic': fix_n, 'center_x': 10, 'center_y': 10})
        source_params = [kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source]
    else:
        source_params = source_params
    kwargs_params = {'source_model': source_params}
    
    #==============================================================================
    #Doing the QSO fitting 
    #==============================================================================
    kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, background_rms, inverse=True)
    data_class = ImageData(**kwargs_data)
    if psf_ave is not None:
        kwargs_psf = {'psf_type': psf_type, 'kernel_point_source': kernel}
    else:
        kwargs_psf =  {'psf_type': 'NONE'}
    
    psf_class = PSF(**kwargs_psf)
    data_class.update_data(galaxy_im)
    
    light_model_list = ['SERSIC_ELLIPSE'] * len(source_params[0])
    lightModel = LightModel(light_model_list=light_model_list)
    
    kwargs_model = { 'source_light_model_list': light_model_list}
    # numerical options and fitting sequences
    kwargs_constraints = {}
    
    kwargs_likelihood = {'check_bounds': True,  #Set the bonds, if exceed, reutrn "penalty"
                         'source_marg': False,  #In likelihood_module.LikelihoodModule -- whether to fully invert the covariance matrix for marginalization
                          'check_positive_flux': True,       
                          'image_likelihood_mask_list': [galaxy_msk]
                         }
    kwargs_data['image_data'] = galaxy_im
    if galaxy_std is not None:
        kwargs_data['noise_map'] = galaxy_std
    if psf_std is not None:
        kwargs_psf['psf_error_map'] = psf_std
                  
    image_band = [kwargs_data, kwargs_psf, kwargs_numerics]
    multi_band_list = [image_band]
    
    kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear'}  # 'single-band', 'multi-linear', 'joint-linear'
    fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)
    
    if deep_seed == False:
        fitting_kwargs_list = [
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 50, 'n_iterations': 50}],
            ['MCMC', {'n_burn': 10, 'n_run': 10, 'walkerRatio': 50, 'sigma_scale': .1}]
            ]            
    elif deep_seed == True:
         fitting_kwargs_list = [
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 100, 'n_iterations': 80}],
            ['MCMC', {'n_burn': 10, 'n_run': 15, 'walkerRatio': 50, 'sigma_scale': .1}]
            ]
    elif deep_seed == 'very_deep':
         fitting_kwargs_list = [
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 150, 'n_iterations': 150}],
            ['MCMC', {'n_burn': 10, 'n_run': 20, 'walkerRatio': 50, 'sigma_scale': .1}]
            ]
    if no_MCMC == True:
        fitting_kwargs_list = [fitting_kwargs_list[0],
                               ]        
    
    start_time = time.time()
    chain_list = fitting_seq.fit_sequence(fitting_kwargs_list)
    kwargs_result = fitting_seq.best_fit()
    ps_result = kwargs_result['kwargs_ps']
    source_result = kwargs_result['kwargs_source']
    
    if no_MCMC == False:
        sampler_type, samples_mcmc, param_mcmc, dist_mcmc  = chain_list[1]      
    
#    chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)
#    lens_result, source_result, lens_light_result, ps_result, cosmo_temp = fitting_seq.best_fit()
    end_time = time.time()
    print(end_time - start_time, 'total time needed for computation')
    print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
    # this is the linear inversion. The kwargs will be updated afterwards
    imageModel = ImageModel(data_class, psf_class, source_model_class=lightModel,kwargs_numerics=kwargs_numerics)
    imageLinearFit = ImageLinearFit(data_class=data_class, psf_class=psf_class,
                                       source_model_class=lightModel,
                                       kwargs_numerics=kwargs_numerics)    
    image_reconstructed, error_map, _, _ = imageLinearFit.image_linear_solve(kwargs_source=source_result, kwargs_ps=ps_result)
#    image_host = []   #!!! The linear_solver before and after could have different result for very faint sources.
#    for i in range(len(source_result)):
#        image_host_i = imageModel.source_surface_brightness(source_result,de_lensed=True,unconvolved=False, k=i)
#        print("image_host_i", source_result[i])
#        print("total flux", image_host_i.sum())
#        image_host.append(image_host_i)  
        
    # let's plot the output of the PSO minimizer
    modelPlot = ModelPlot(multi_band_list, kwargs_model, kwargs_result,
                          arrow_size=0.02, cmap_string="gist_heat", likelihood_mask_list=[galaxy_msk])  
    
    if pso_diag == True:
        f, axes = chain_plot.plot_chain_list(chain_list,0)
        if pltshow == 0:
            plt.close()
        else:
            plt.show()
                
    reduced_Chisq =  imageLinearFit.reduced_chi2(image_reconstructed, error_map)
    if image_plot:
        f, axes = plt.subplots(1, 3, figsize=(16, 16), sharex=False, sharey=False)
        modelPlot.data_plot(ax=axes[0])
        modelPlot.model_plot(ax=axes[1])
        modelPlot.normalized_residual_plot(ax=axes[2], v_min=-6, v_max=6)
        f.tight_layout()
        #f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)
        if tag is not None:
            f.savefig('{0}_fitted_image.pdf'.format(tag))
        if pltshow == 0:
            plt.close()
        else:
            plt.show()
    image_host = []    
    for i in range(len(source_result)):
        image_host_i = imageModel.source_surface_brightness(source_result,de_lensed=True,unconvolved=False, k=i)
#        print("image_host_i", source_result[i])
#        print("total flux", image_host_i.sum())
        image_host.append(image_host_i)  
        
    if corner_plot==True and no_MCMC==False:
        # here the (non-converged) MCMC chain of the non-linear parameters
        if not samples_mcmc == []:
           n, num_param = np.shape(samples_mcmc)
           plot = corner.corner(samples_mcmc, labels=param_mcmc, show_titles=True)
           if tag is not None:
               plot.savefig('{0}_para_corner.pdf'.format(tag))
           if pltshow == 0:
               plt.close()
           else:
               plt.show()
    if flux_corner_plot ==True and no_MCMC==False:
        param = Param(kwargs_model, kwargs_fixed_source=source_params[2], **kwargs_constraints)
        mcmc_new_list = []
        labels_new = ["host{0} flux".format(i) for i in range(len(source_params[0]))]
        for i in range(len(samples_mcmc)):
            kwargs_out = param.args2kwargs(samples_mcmc[i])
            kwargs_light_source_out = kwargs_out['kwargs_source']
            kwargs_ps_out =  kwargs_out['kwargs_ps']
            image_reconstructed, _, _, _ = imageLinearFit.image_linear_solve(kwargs_source=kwargs_light_source_out, kwargs_ps=kwargs_ps_out)
            fluxs = []
            for j in range(len(source_params[0])):
                image_j = imageModel.source_surface_brightness(kwargs_light_source_out,unconvolved= False, k=j)
                fluxs.append(np.sum(image_j))
            mcmc_new_list.append( fluxs )
            if int(i/1000) > int((i-1)/1000) :
                print(len(samples_mcmc), "MCMC samplers in total, finished translate:", i    )
        plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True)
        if tag is not None:
            plot.savefig('{0}_HOSTvsQSO_corner.pdf'.format(tag))
        if pltshow == 0:
            plt.close()
        else:
            plt.show() 

    if galaxy_std is None:
        noise_map = np.sqrt(data_class.C_D+np.abs(error_map))
    else:
        noise_map = np.sqrt(galaxy_std**2+np.abs(error_map))   
        
    if dump_result == True:
        if flux_corner_plot==True and no_MCMC==False:
            trans_paras = [source_params[2], mcmc_new_list, labels_new, 'source_params[2], mcmc_new_list, labels_new']
        else:
            trans_paras = []
        picklename= tag + '.pkl'
        best_fit = [source_result, image_host, 'source_result, image_host']
#        pso_fit = [chain_list, param_list, 'chain_list, param_list']
#        mcmc_fit = [samples_mcmc, param_mcmc, dist_mcmc, 'samples_mcmc, param_mcmc, dist_mcmc']
        chain_list_result = [chain_list, 'chain_list']
        pickle.dump([best_fit, chain_list_result, trans_paras], open(picklename, 'wb'))
        
    if return_Chisq == False:
        return source_result, image_host, noise_map
    elif return_Chisq == True:
        return source_result, image_host, noise_map, reduced_Chisq
Esempio n. 18
0
                              }

kwargs_likelihood = {'check_bounds': True,
                     'force_no_add_image': False,
                     'source_marg': False,
                     'image_position_uncertainty': 0.004,
                     'check_matched_source_position': True,
                     'source_position_tolerance': 0.001,
                     'time_delay_likelihood': True,
                             }
kwargs_numerics = {'supersampling_factor': 1}
image_band = [kwargs_data, kwargs_psf, kwargs_numerics]
multi_band_list = [image_band]
kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear',
                    'time_delays_measured': delta_t["delta_t"].to_numpy(),
                    'time_delays_uncertainties': delta_t["sigma"].to_numpy(),}

from lenstronomy.Workflow.fitting_sequence import FittingSequence
fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)

fitting_kwargs_list = [
    ['PSO', {'sigma_scale': .1, 'n_particles': 200, 'n_iterations': 200}],
        ['MCMC', {'n_burn': 100, 'n_run': 100, 'walkerRatio': 10, 'sigma_scale': .1}]
]

start_time = time.time()
chain_list = fitting_seq.fit_sequence(fitting_kwargs_list)
kwargs_result = fitting_seq.best_fit()
end_time = time.time()
print(end_time - start_time, 'total time needed for computation')
print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
Esempio n. 19
0
def fit_qso(QSO_im, psf_ave, psf_std=None, source_params=None,ps_param=None, background_rms=0.04, pix_sz = 0.168,
            exp_time = 300., fix_n=None, image_plot = True, corner_plot=True, supersampling_factor = 2, 
            flux_ratio_plot=False, deep_seed = False, fixcenter = False, QSO_msk=None, QSO_std=None,
            tag = None, no_MCMC= False, pltshow = 1, return_Chisq = False, dump_result = False, pso_diag=False):
    '''
    A quick fit for the QSO image with (so far) single sersice + one PSF. The input psf noise is optional.
    
    Parameter
    --------
        QSO_im: An array of the QSO image.
        psf_ave: The psf image.
        psf_std: The psf noise, optional.
        source_params: The prior for the source. Default is given. If [], means no Sersic light.
        background_rms: default as 0.04
        exp_time: default at 2400.
        deep_seed: if Ture, more mcmc steps will be performed.
        tag: The name tag for save the plot
            
    Return
    --------
        Will output the fitted image (Set image_plot = True), the corner_plot and the flux_ratio_plot.
        source_result, ps_result, image_ps, image_host
    
    To do
    --------
        
    '''
    # data specifics need to set up based on the data situation
    background_rms = background_rms  #  background noise per pixel (Gaussian)
    exp_time = exp_time  #  exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
    numPix = len(QSO_im)  #  cutout pixel size
    deltaPix = pix_sz
    psf_type = 'PIXEL'  # 'gaussian', 'pixel', 'NONE'
    kernel = psf_ave

    kwargs_numerics = {'supersampling_factor': supersampling_factor, 'supersampling_convolution': False} 
    
    if source_params is None:
        # here are the options for the host galaxy fitting
        fixed_source = []
        kwargs_source_init = []
        kwargs_source_sigma = []
        kwargs_lower_source = []
        kwargs_upper_source = []
        
        if fix_n == None:
            fixed_source.append({})  # we fix the Sersic index to n=1 (exponential)
            kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': 2., 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
            kwargs_source_sigma.append({'n_sersic': 0.5, 'R_sersic': 0.5, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
            kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.1, 'n_sersic': 0.3, 'center_x': -10, 'center_y': -10})
            kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3., 'n_sersic': 7., 'center_x': 10, 'center_y': 10})
        elif fix_n is not None:
            fixed_source.append({'n_sersic': fix_n})
            kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': fix_n, 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
            kwargs_source_sigma.append({'n_sersic': 0.001, 'R_sersic': 0.5, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
            kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.1, 'n_sersic': fix_n, 'center_x': -10, 'center_y': -10})
            kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3, 'n_sersic': fix_n, 'center_x': 10, 'center_y': 10})
        source_params = [kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source]
    else:
        source_params = source_params
    
    if ps_param is None:
        center_x = 0.0
        center_y = 0.0
        point_amp = QSO_im.sum()/2.
        fixed_ps = [{}]
        kwargs_ps = [{'ra_image': [center_x], 'dec_image': [center_y], 'point_amp': [point_amp]}]
        kwargs_ps_init = kwargs_ps
        kwargs_ps_sigma = [{'ra_image': [0.05], 'dec_image': [0.05]}]
        kwargs_lower_ps = [{'ra_image': [-0.6], 'dec_image': [-0.6]}]
        kwargs_upper_ps = [{'ra_image': [0.6], 'dec_image': [0.6]}]
        ps_param = [kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps]
    else:
        ps_param = ps_param
    
    #==============================================================================
    #Doing the QSO fitting 
    #==============================================================================
    kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, background_rms, inverse=True)
    data_class = ImageData(**kwargs_data)
    kwargs_psf = {'psf_type': psf_type, 'kernel_point_source': kernel}
    psf_class = PSF(**kwargs_psf)
    data_class.update_data(QSO_im)
    
    point_source_list = ['UNLENSED'] * len(ps_param[0])
    pointSource = PointSource(point_source_type_list=point_source_list)
    
    if fixcenter == False:
        kwargs_constraints = {'num_point_source_list': [1] * len(ps_param[0])
                              }
    elif fixcenter == True:
        kwargs_constraints = {'joint_source_with_point_source': [[i, i] for i in range(len(ps_param[0]))],
                              'num_point_source_list': [1] * len(ps_param[0])
                              }
    
    
    if source_params == []:   #fitting image as Point source only.
        kwargs_params = {'point_source_model': ps_param}
        lightModel = None
        kwargs_model = {'point_source_model_list': point_source_list }
        imageModel = ImageModel(data_class, psf_class, point_source_class=pointSource, kwargs_numerics=kwargs_numerics)
        kwargs_likelihood = {'check_bounds': True,  #Set the bonds, if exceed, reutrn "penalty"
                             'image_likelihood_mask_list': [QSO_msk]
                     }
    elif source_params != []:
        kwargs_params = {'source_model': source_params,
                 'point_source_model': ps_param}

        light_model_list = ['SERSIC_ELLIPSE'] * len(source_params[0])
        lightModel = LightModel(light_model_list=light_model_list)
        kwargs_model = { 'source_light_model_list': light_model_list,
                        'point_source_model_list': point_source_list
                        }
        imageModel = ImageModel(data_class, psf_class, source_model_class=lightModel,
                                point_source_class=pointSource, kwargs_numerics=kwargs_numerics)
        # numerical options and fitting sequences
        kwargs_likelihood = {'check_bounds': True,  #Set the bonds, if exceed, reutrn "penalty"
                             'source_marg': False,  #In likelihood_module.LikelihoodModule -- whether to fully invert the covariance matrix for marginalization
                              'check_positive_flux': True, 
                              'image_likelihood_mask_list': [QSO_msk]
                             }
    
    kwargs_data['image_data'] = QSO_im
    if QSO_std is not None:
        kwargs_data['noise_map'] = QSO_std
    
    if psf_std is not None:
        kwargs_psf['psf_error_map'] = psf_std
    image_band = [kwargs_data, kwargs_psf, kwargs_numerics]
    multi_band_list = [image_band]

    kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear'}  # 'single-band', 'multi-linear', 'joint-linear'
    fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)
    
    if deep_seed == False:
        fitting_kwargs_list = [
             ['PSO', {'sigma_scale': 0.8, 'n_particles': 100, 'n_iterations': 60}],
             ['MCMC', {'n_burn': 10, 'n_run': 10, 'walkerRatio': 50, 'sigma_scale': .1}]
            ]
    elif deep_seed == True:
         fitting_kwargs_list = [
             ['PSO', {'sigma_scale': 0.8, 'n_particles': 250, 'n_iterations': 250}],
             ['MCMC', {'n_burn': 100, 'n_run': 200, 'walkerRatio': 10, 'sigma_scale': .1}]
            ]
    if no_MCMC == True:
        fitting_kwargs_list = [fitting_kwargs_list[0],
                               ]        

    start_time = time.time()
    chain_list = fitting_seq.fit_sequence(fitting_kwargs_list)
    kwargs_result = fitting_seq.best_fit()
    ps_result = kwargs_result['kwargs_ps']
    source_result = kwargs_result['kwargs_source']
    if no_MCMC == False:
        sampler_type, samples_mcmc, param_mcmc, dist_mcmc  = chain_list[1]    
    
    end_time = time.time()
    print(end_time - start_time, 'total time needed for computation')
    print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
    imageLinearFit = ImageLinearFit(data_class=data_class, psf_class=psf_class,
                                    source_model_class=lightModel,
                                    point_source_class=pointSource, 
                                    kwargs_numerics=kwargs_numerics)    
    image_reconstructed, error_map, _, _ = imageLinearFit.image_linear_solve(kwargs_source=source_result, kwargs_ps=ps_result)
    # this is the linear inversion. The kwargs will be updated afterwards
    modelPlot = ModelPlot(multi_band_list, kwargs_model, kwargs_result,
                          arrow_size=0.02, cmap_string="gist_heat", likelihood_mask_list=[QSO_msk])
    image_host = []  #!!! The linear_solver before and after LensModelPlot could have different result for very faint sources.
    for i in range(len(source_result)):
        image_host.append(imageModel.source_surface_brightness(source_result, de_lensed=True,unconvolved=False,k=i))
    
    image_ps = []
    for i in range(len(ps_result)):
        image_ps.append(imageModel.point_source(ps_result, k = i))
    
    if pso_diag == True:
        f, axes = chain_plot.plot_chain_list(chain_list,0)
        if pltshow == 0:
            plt.close()
        else:
            plt.show()

    # let's plot the output of the PSO minimizer
    reduced_Chisq =  imageLinearFit.reduced_chi2(image_reconstructed, error_map)
    if image_plot:
        f, axes = plt.subplots(3, 3, figsize=(16, 16), sharex=False, sharey=False)
        modelPlot.data_plot(ax=axes[0,0], text="Data")
        modelPlot.model_plot(ax=axes[0,1])
        modelPlot.normalized_residual_plot(ax=axes[0,2], v_min=-6, v_max=6)
        
        modelPlot.decomposition_plot(ax=axes[1,0], text='Host galaxy', source_add=True, unconvolved=True)
        modelPlot.decomposition_plot(ax=axes[1,1], text='Host galaxy convolved', source_add=True)
        modelPlot.decomposition_plot(ax=axes[1,2], text='All components convolved', source_add=True, lens_light_add=True, point_source_add=True)
        
        modelPlot.subtract_from_data_plot(ax=axes[2,0], text='Data - Point Source', point_source_add=True)
        modelPlot.subtract_from_data_plot(ax=axes[2,1], text='Data - host galaxy', source_add=True)
        modelPlot.subtract_from_data_plot(ax=axes[2,2], text='Data - host galaxy - Point Source', source_add=True, point_source_add=True)
        
        f.tight_layout()
        #f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)
        if tag is not None:
            f.savefig('{0}_fitted_image.pdf'.format(tag))
        if pltshow == 0:
            plt.close()
        else:
            plt.show()
        
    if corner_plot==True and no_MCMC==False:
        # here the (non-converged) MCMC chain of the non-linear parameters
        if not samples_mcmc == []:
           n, num_param = np.shape(samples_mcmc)
           plot = corner.corner(samples_mcmc, labels=param_mcmc, show_titles=True)
           if tag is not None:
               plot.savefig('{0}_para_corner.pdf'.format(tag))
           plt.close()               
           # if pltshow == 0:
           #     plt.close()
           # else:
           #     plt.show()
        
    if flux_ratio_plot==True and no_MCMC==False:
        param = Param(kwargs_model, kwargs_fixed_source=source_params[2], kwargs_fixed_ps=ps_param[2], **kwargs_constraints)
        mcmc_new_list = []
        if len(ps_param[2]) == 1:
            labels_new = ["Quasar flux"] +  ["host{0} flux".format(i) for i in range(len(source_params[0]))]
        else:
            labels_new = ["Quasar{0} flux".format(i) for i in range(len(ps_param[2]))] +  ["host{0} flux".format(i) for i in range(len(source_params[0]))]
        if len(samples_mcmc) > 10000:
            trans_steps = [len(samples_mcmc)-10000, len(samples_mcmc)]
        else:
            trans_steps = [0, len(samples_mcmc)]
        for i in range(trans_steps[0], trans_steps[1]):
            kwargs_out = param.args2kwargs(samples_mcmc[i])
            kwargs_light_source_out = kwargs_out['kwargs_source']
            kwargs_ps_out =  kwargs_out['kwargs_ps']
            image_reconstructed, _, _, _ = imageLinearFit.image_linear_solve(kwargs_source=kwargs_light_source_out, kwargs_ps=kwargs_ps_out)
            flux_quasar = []
            if len(ps_param[0]) == 1:
                image_ps_j = imageModel.point_source(kwargs_ps_out)
                flux_quasar.append(np.sum(image_ps_j))  
            else:    
                for j in range(len(ps_param[0])):
                    image_ps_j = imageModel.point_source(kwargs_ps_out, k=j)
                    flux_quasar.append(np.sum(image_ps_j))
            fluxs = []
            for j in range(len(source_params[0])):
                image_j = imageModel.source_surface_brightness(kwargs_light_source_out,unconvolved= False, k=j)
                fluxs.append(np.sum(image_j))
            mcmc_new_list.append(flux_quasar + fluxs )
            if int(i/1000) > int((i-1)/1000) :
                print(len(samples_mcmc), "MCMC samplers in total, finished translate:", i )
        plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True)
        if tag is not None:
            plot.savefig('{0}_HOSTvsQSO_corner.pdf'.format(tag))
        if pltshow == 0:
            plt.close()
        else:
            plt.show()
    if QSO_std is None:
        noise_map = np.sqrt(data_class.C_D+np.abs(error_map))
    else:
        noise_map = np.sqrt(QSO_std**2+np.abs(error_map))
    if dump_result == True:
        if flux_ratio_plot==True and no_MCMC==False:
            trans_paras = [mcmc_new_list, labels_new, 'mcmc_new_list, labels_new']
        else:
            trans_paras = []
        picklename= tag + '.pkl'
        best_fit = [source_result, image_host, ps_result, image_ps,'source_result, image_host, ps_result, image_ps']
        chain_list_result = [chain_list, 'chain_list']
        kwargs_fixed_source=source_params[2]
        kwargs_fixed_ps=ps_param[2]
        classes = data_class, psf_class, lightModel, pointSource
        material = multi_band_list, kwargs_model, kwargs_result, QSO_msk, kwargs_fixed_source, kwargs_fixed_ps, kwargs_constraints, kwargs_numerics, classes
        pickle.dump([best_fit, chain_list_result, trans_paras, material], open(picklename, 'wb'))
    if return_Chisq == False:
        return source_result, ps_result, image_ps, image_host, noise_map
    elif return_Chisq == True:
        return source_result, ps_result, image_ps, image_host, noise_map, reduced_Chisq
Esempio n. 20
0
def fit_qso_multiband(QSO_im_list, psf_ave_list, psf_std_list=None, source_params=None,ps_param=None,
                      background_rms_list=[0.04]*5, pix_sz = 0.168,
                      exp_time = 300., fix_n=None, image_plot = True, corner_plot=True,
                      flux_ratio_plot=True, deep_seed = False, fixcenter = False, QSO_msk_list=None,
                      QSO_std_list=None, tag = None, no_MCMC= False, pltshow = 1, new_band_seq=None):
    '''
    A quick fit for the QSO image with (so far) single sersice + one PSF. The input psf noise is optional.
    
    Parameter
    --------
        QSO_im: An array of the QSO image.
        psf_ave: The psf image.
        psf_std: The psf noise, optional.
        source_params: The prior for the source. Default is given.
        background_rms: default as 0.04
        exp_time: default at 2400.
        deep_seed: if Ture, more mcmc steps will be performed.
        tag: The name tag for save the plot
            
    Return
    --------
        Will output the fitted image (Set image_plot = True), the corner_plot and the flux_ratio_plot.
        source_result, ps_result, image_ps, image_host
    
    To do
    --------
        
    '''
    # data specifics need to set up based on the data situation
    background_rms_list = background_rms_list  #  background noise per pixel (Gaussian)
    exp_time = exp_time  #  exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
    numPix = len(QSO_im_list[0])  #  cutout pixel size
    deltaPix = pix_sz
    psf_type = 'PIXEL'  # 'gaussian', 'pixel', 'NONE'
    kernel_list = psf_ave_list
    if new_band_seq == None:
        new_band_seq= range(len(QSO_im_list))
    
#    if psf_std_list is not None:
#        kwargs_numerics_list = [{'subgrid_res': 1, 'psf_subgrid': False, 'psf_error_map': True}] * len(QSO_im_list)     #Turn on the PSF error map
#    else: 
    kwargs_numerics_list = [{'supersampling_factor': 1, 'supersampling_convolution': False}] * len(QSO_im_list)
    
    if source_params is None:
        # here are the options for the host galaxy fitting
        fixed_source = []
        kwargs_source_init = []
        kwargs_source_sigma = []
        kwargs_lower_source = []
        kwargs_upper_source = []
        
        # Disk component, as modelled by an elliptical Sersic profile
        if fix_n == None:
            fixed_source.append({})  # we fix the Sersic index to n=1 (exponential)
            kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': 2., 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
            kwargs_source_sigma.append({'n_sersic': 0.5, 'R_sersic': 0.5, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
            kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.1, 'n_sersic': 0.3, 'center_x': -10, 'center_y': -10})
            kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3., 'n_sersic': 7., 'center_x': 10, 'center_y': 10})
        elif fix_n is not None:
            fixed_source.append({'n_sersic': fix_n})
            kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': fix_n, 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
            kwargs_source_sigma.append({'n_sersic': 0.001, 'R_sersic': 0.5, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
            kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.1, 'n_sersic': fix_n, 'center_x': -10, 'center_y': -10})
            kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3, 'n_sersic': fix_n, 'center_x': 10, 'center_y': 10})
        source_params = [kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source]
    else:
        source_params = source_params
    
    if ps_param is None:
        center_x = 0.0
        center_y = 0.0
        point_amp = QSO_im_list[0].sum()/2.
        fixed_ps = [{}]
        kwargs_ps = [{'ra_image': [center_x], 'dec_image': [center_y], 'point_amp': [point_amp]}]
        kwargs_ps_init = kwargs_ps
        kwargs_ps_sigma = [{'ra_image': [0.01], 'dec_image': [0.01]}]
        kwargs_lower_ps = [{'ra_image': [-10], 'dec_image': [-10]}]
        kwargs_upper_ps = [{'ra_image': [10], 'dec_image': [10]}]
        ps_param = [kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps]
    else:
        ps_param = ps_param
    
    kwargs_params = {'source_model': source_params,
                     'point_source_model': ps_param}
    
    #==============================================================================
    #Doing the QSO fitting 
    #==============================================================================
    kwargs_data_list, data_class_list = [], []
    for i in range(len(QSO_im_list)):
        kwargs_data_i = sim_util.data_configure_simple(numPix, deltaPix, exp_time, background_rms_list[i], inverse=True)
        kwargs_data_list.append(kwargs_data_i)
        data_class_list.append(ImageData(**kwargs_data_i))
    kwargs_psf_list = []
    psf_class_list = []
    for i in range(len(QSO_im_list)):
        kwargs_psf_i = {'psf_type': psf_type, 'kernel_point_source': kernel_list[i]}
        kwargs_psf_list.append(kwargs_psf_i)
        psf_class_list.append(PSF(**kwargs_psf_i))
        data_class_list[i].update_data(QSO_im_list[i])
    
    light_model_list = ['SERSIC_ELLIPSE'] * len(source_params[0])
    lightModel = LightModel(light_model_list=light_model_list)
    point_source_list = ['UNLENSED']
    pointSource = PointSource(point_source_type_list=point_source_list)
    
    imageModel_list = []
    for i in range(len(QSO_im_list)):
        kwargs_data_list[i]['image_data'] = QSO_im_list[i]
#        if QSO_msk_list is not None:
#            kwargs_numerics_list[i]['mask'] = QSO_msk_list[i]
        if QSO_std_list is not None:
            kwargs_data_list[i]['noise_map'] = QSO_std_list[i]
#        if psf_std_list is not None:
#            kwargs_psf_list[i]['psf_error_map'] = psf_std_list[i]
    
    image_band_list = []
    for i in range(len(QSO_im_list)):
        imageModel_list.append(ImageModel(data_class_list[i], psf_class_list[i], source_model_class=lightModel,
                                        point_source_class=pointSource, kwargs_numerics=kwargs_numerics_list[i]))
                  
        
        image_band_list.append([kwargs_data_list[i], kwargs_psf_list[i], kwargs_numerics_list[i]])
    multi_band_list = [image_band_list[i] for i in range(len(QSO_im_list))]
    
    # numerical options and fitting sequences
    
    kwargs_model = { 'source_light_model_list': light_model_list,
                    'point_source_model_list': point_source_list
                    }
    
    if fixcenter == False:
        kwargs_constraints = {'num_point_source_list': [1]
                              }
    elif fixcenter == True:
        kwargs_constraints = {'joint_source_with_point_source': [[0, 0]],
                              'num_point_source_list': [1]
                              }
    
    kwargs_likelihood = {'check_bounds': True,  #Set the bonds, if exceed, reutrn "penalty"
                         'source_marg': False,  #In likelihood_module.LikelihoodModule -- whether to fully invert the covariance matrix for marginalization
                          'check_positive_flux': True,       
                          'image_likelihood_mask_list': [QSO_msk_list]
                         }
    
#    mpi = False  # MPI possible, but not supported through that notebook.
    # The Params for the fitting. kwargs_init: initial input. kwargs_sigma: The parameter uncertainty. kwargs_fixed: fixed parameters;
    #kwargs_lower,kwargs_upper: Lower and upper limits.
    kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear'}  # 'single-band', 'multi-linear', 'joint-linear'
    fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)
    
    if deep_seed == False:
        fitting_kwargs_list = [
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 80, 'n_iterations': 60, 'compute_bands': [True]+[False]*(len(QSO_im_list)-1)}],
            ['align_images', {'n_particles': 10, 'n_iterations': 10, 'compute_bands': [False]+[True]*(len(QSO_im_list)-1)}],
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 100, 'n_iterations': 200, 'compute_bands': [True]*len(QSO_im_list)}],
            ['MCMC', {'n_burn': 10, 'n_run': 20, 'walkerRatio': 50, 'sigma_scale': .1}]              
            ]
    elif deep_seed == True:
         fitting_kwargs_list = [
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 150, 'n_iterations': 60, 'compute_bands': [True]+[False]*(len(QSO_im_list)-1)}],
            ['align_images', {'n_particles': 20, 'n_iterations': 20, 'compute_bands': [False]+[True]*(len(QSO_im_list)-1)}],
            ['PSO', {'sigma_scale': 0.8, 'n_particles': 150, 'n_iterations': 200, 'compute_bands': [True]*len(QSO_im_list)}],
            ['MCMC', {'n_burn': 20, 'n_run': 40, 'walkerRatio': 50, 'sigma_scale': .1}]                 
            ]
    if no_MCMC == True:
        del fitting_kwargs_list[-1]
    
    start_time = time.time()
#    lens_result, source_result, lens_light_result, ps_result, cosmo_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)
    chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)
    lens_result, source_result, lens_light_result, ps_result, cosmo_temp = fitting_seq.best_fit()    
    end_time = time.time()
    print(end_time - start_time, 'total time needed for computation')
    print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
    source_result_list, ps_result_list = [], []
    image_reconstructed_list, error_map_list, image_ps_list, image_host_list, shift_RADEC_list=[], [], [], [],[]
    imageLinearFit_list = []
    for k in range(len(QSO_im_list)):
    # this is the linear inversion. The kwargs will be updated afterwards
        imageLinearFit_k = ImageLinearFit(data_class_list[k], psf_class_list[k], source_model_class=lightModel,
                                        point_source_class=pointSource, kwargs_numerics=kwargs_numerics_list[k])  
        image_reconstructed_k, error_map_k, _, _ = imageLinearFit_k.image_linear_solve(kwargs_source=source_result, kwargs_ps=ps_result)
        imageLinearFit_list.append(imageLinearFit_k) 
        
        [kwargs_data_k, kwargs_psf_k, kwargs_numerics_k] = fitting_seq.multi_band_list[k]
#        data_class_k = data_class_list[k] #ImageData(**kwargs_data_k)
#        psf_class_k = psf_class_list[k] #PSF(**kwargs_psf_k)
#        imageModel_k = ImageModel(data_class_k, psf_class_k, source_model_class=lightModel,
#                                point_source_class=pointSource, kwargs_numerics=kwargs_numerics_list[k])
        imageModel_k = imageModel_list[k]
        modelPlot = ModelPlot(multi_band_list[k], kwargs_model, lens_result, source_result,
                                 lens_light_result, ps_result, arrow_size=0.02, cmap_string="gist_heat", likelihood_mask=QSO_im_list[k])
        print("source_result", 'for', "k", source_result)
        image_host_k = []
        for i in range(len(source_result)):
            image_host_k.append(imageModel_list[k].source_surface_brightness(source_result,de_lensed=True,unconvolved=False, k=i))
        image_ps_k = imageModel_k.point_source(ps_result)
        # let's plot the output of the PSO minimizer
        
        image_reconstructed_list.append(image_reconstructed_k)
        source_result_list.append(source_result)
        ps_result_list.append(ps_result)
        error_map_list.append(error_map_k)
        image_ps_list.append(image_ps_k)
        image_host_list.append(image_host_k)
        if 'ra_shift' in fitting_seq.multi_band_list[k][0].keys():
            shift_RADEC_list.append([fitting_seq.multi_band_list[k][0]['ra_shift'], fitting_seq.multi_band_list[k][0]['dec_shift']])
        else:
            shift_RADEC_list.append([0,0])
        if image_plot:
            f, axes = plt.subplots(3, 3, figsize=(16, 16), sharex=False, sharey=False)
            modelPlot.data_plot(ax=axes[0,0], text="Data")
            modelPlot.model_plot(ax=axes[0,1])
            modelPlot.normalized_residual_plot(ax=axes[0,2], v_min=-6, v_max=6)
            
            modelPlot.decomposition_plot(ax=axes[1,0], text='Host galaxy', source_add=True, unconvolved=True)
            modelPlot.decomposition_plot(ax=axes[1,1], text='Host galaxy convolved', source_add=True)
            modelPlot.decomposition_plot(ax=axes[1,2], text='All components convolved', source_add=True, lens_light_add=True, point_source_add=True)
            
            modelPlot.subtract_from_data_plot(ax=axes[2,0], text='Data - Point Source', point_source_add=True)
            modelPlot.subtract_from_data_plot(ax=axes[2,1], text='Data - host galaxy', source_add=True)
            modelPlot.subtract_from_data_plot(ax=axes[2,2], text='Data - host galaxy - Point Source', source_add=True, point_source_add=True)
            f.tight_layout()
            if tag is not None:
                f.savefig('{0}_fitted_image_band{1}.pdf'.format(tag,new_band_seq[k]))
            if pltshow == 0:
                plt.close()
            else:
                plt.show()
            
            if corner_plot==True and no_MCMC==False and k ==0:
                # here the (non-converged) MCMC chain of the non-linear parameters
                if not samples_mcmc == []:
                   n, num_param = np.shape(samples_mcmc)
                   plot = corner.corner(samples_mcmc, labels=param_mcmc, show_titles=True)
                   if tag is not None:
                       plot.savefig('{0}_para_corner.pdf'.format(tag))
                   if pltshow == 0:
                       plt.close()
                   else:
                       plt.show()
            if flux_ratio_plot==True and no_MCMC==False:
                param = Param(kwargs_model, kwargs_fixed_source=source_params[2], kwargs_fixed_ps=fixed_ps, **kwargs_constraints)
                mcmc_new_list = []
                labels_new = [r"Quasar flux", r"host_flux", r"source_x", r"source_y"]
                # transform the parameter position of the MCMC chain in a lenstronomy convention with keyword arguments #
                for i in range(len(samples_mcmc)/10):
                    kwargs_lens_out, kwargs_light_source_out, kwargs_light_lens_out, kwargs_ps_out, kwargs_cosmo = param.getParams(samples_mcmc[i+ len(samples_mcmc)/10*9])
                    image_reconstructed, _, _, _ = imageLinearFit_list[k].image_linear_solve(kwargs_source=kwargs_light_source_out, kwargs_ps=kwargs_ps_out)
                    
                    image_ps = imageModel_list[k].point_source(kwargs_ps_out)
                    flux_quasar = np.sum(image_ps)
                    image_disk = imageModel_list[k].source_surface_brightness(kwargs_light_source_out,de_lensed=True,unconvolved=False, k=0)
                    flux_disk = np.sum(image_disk)
                    source_x = kwargs_ps_out[0]['ra_image']
                    source_y = kwargs_ps_out[0]['dec_image']
                    if flux_disk>0:
                        mcmc_new_list.append([flux_quasar, flux_disk, source_x, source_y])
                plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True)
                if tag is not None:
                    plot.savefig('{0}_HOSTvsQSO_corner_band{1}.pdf'.format(tag,new_band_seq[k]))
                if pltshow == 0:
                    plt.close()
                else:
                    plt.show()
    errp_list = []
    for k in range(len(QSO_im_list)):
        if QSO_std_list is None:
            errp_list.append(np.sqrt(data_class_list[k].C_D+np.abs(error_map_list[k])))
        else:
            errp_list.append(np.sqrt(QSO_std_list[k]**2+np.abs(error_map_list[k])))
    return source_result_list, ps_result_list, image_ps_list, image_host_list, errp_list, shift_RADEC_list, fitting_seq     #fitting_seq.multi_band_list
def sl_sys_analysis():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        print(":Registered %d processes" % comm_size)
        args["infile"] = sys.argv[1]
        args["nimgs"] = sys.argv[2]
        args["los"] = sys.argv[3]
        args["version"] = sys.argv[4]
        args["dt_sigma"] = float(sys.argv[5])
        args["image_amps_sigma"] = float(sys.argv[6])
        args["flux_ratio_errors"] = float(sys.argv[7])
        args["astrometry_sigma"] = float(sys.argv[8])

    args = comm.bcast(args)
    # Organize devision of strong lensing systems
    with open(args["infile"], "r") as myfile:
        limg_data = myfile.read()
    systems = json.loads(limg_data)
    sys_nr_per_proc = int(len(systems) / comm_size)
    print("comm_rank", comm_rank)
    start_sys = sys_nr_per_proc * comm_rank
    end_sys = sys_nr_per_proc * (comm_rank + 1)
    print(start_sys, end_sys)
    with open("../lens_catalogs_sie_only.json", "r") as myfile:
        limg_data = myfile.read()
    systems_prior = json.loads(limg_data)

    if comm_rank == 0:
        print("Each process will have %d systems" % sys_nr_per_proc)
        print("That should take app. %f min." % (sys_nr_per_proc * 20))

    source_size_pc = 10.0
    window_size = 0.1  # units of arcseconds
    grid_number = 100  # supersampled window (per axis)
    z_source = 2.0
    cosmo = FlatLambdaCDM(H0=71, Om0=0.3089, Ob0=0.0)

    results = {"gamma": [], "phi_ext": [], "gamma_ext": [], "theta_E": [], "D_dt": []}
    for ii in range(len(systems))[(start_sys + 2) : end_sys]:
        system = systems[ii]
        system_prior = systems_prior[ii]
        print("Analysing system ID: %d" % ii)

        # the data set is
        z_lens = system_prior["zl"]
        lensCosmo = LensCosmo(cosmo=cosmo, z_lens=z_lens, z_source=z_source)
        # convert units of pc into arcseconds
        D_s = lensCosmo.D_s
        source_size_arcsec = source_size_pc / 10 ** 6 / D_s / constants.arcsec
        print("The source size in arcsec init = %.4f" % source_size_arcsec) #0.0012

        # multiple images properties
        ximg = np.zeros(system["nimgs"])
        yimg = np.zeros(system["nimgs"])
        t_days = np.zeros(system["nimgs"])
        image_amps = np.zeros(system["nimgs"])
        for jj in range(system["nimgs"]):
            ximg[jj] = system["ximg"][jj]  # [arcsec]
            yimg[jj] = system["yimg"][jj]  # [arcsec]
            t_days[jj] = system["delay"][jj]  # [days]
            image_amps[jj] = system["mags"][jj]  # [linear units or magnitudes]
        # sort by arrival time
        index_sort = np.argsort(t_days)
        ximg = ximg[index_sort]  # relative RA (arc seconds)
        yimg = yimg[index_sort]  # relative DEC (arc seconds)
        image_amps = np.abs(image_amps[index_sort])
        t_days = t_days[index_sort]
        d_dt = t_days[1:] - t_days[0]

        # measurement uncertainties
        astrometry_sigma = args["astrometry_sigma"]
        ximg_measured = ximg + np.random.normal(0, astrometry_sigma, system["nimgs"])
        yimg_measured = yimg + np.random.normal(0, astrometry_sigma, system["nimgs"])
        image_amps_sigma = np.ones(system["nimgs"]) * args["image_amps_sigma"]
        flux_ratios = image_amps[1:] - image_amps[0]
        flux_ratio_errors = np.ones(system["nimgs"] - 1) * args["flux_ratio_errors"]
        flux_ratios_measured = flux_ratios + np.random.normal(0, flux_ratio_errors)
        d_dt_sigma = np.ones(system["nimgs"] - 1) * args["dt_sigma"]
        d_dt_measured = d_dt + np.random.normal(0, d_dt_sigma)

        kwargs_data_joint = {
            "time_delays_measured": d_dt_measured,
            "time_delays_uncertainties": d_dt_sigma,
            "flux_ratios": flux_ratios_measured,
            "flux_ratio_errors": flux_ratio_errors,
            "ra_image_list": [ximg_measured],
            "dec_image_list": [yimg_measured],
        }

        # lens model choices
        lens_model_list = ["SPEMD", "SHEAR_GAMMA_PSI"]

        # 1. layer: primary SPEP
        fixed_lens = []
        kwargs_lens_init = []
        kwargs_lens_sigma = []
        kwargs_lower_lens = []
        kwargs_upper_lens = []
        fixed_lens.append({})
        kwargs_lens_init.append(
            {
                "theta_E": 1.0,
                "gamma": 2,
                "center_x": 0,
                "center_y": 0,
                "e1": 0,
                "e2": 0.0,
            }
        )
        # error
        kwargs_lens_sigma.append(
            {
                "theta_E": 0.2,
                "e1": 0.1,
                "e2": 0.1,
                "gamma": 0.1,
                "center_x": 0.1,
                "center_y": 0.1,
            }
        )
        # lower limit
        kwargs_lower_lens.append(
            {
                "theta_E": 0.01,
                "e1": -0.5,
                "e2": -0.5,
                "gamma": 1.5,
                "center_x": -10,
                "center_y": -10,
            }
        )
        # upper limit
        kwargs_upper_lens.append(
            {
                "theta_E": 10,
                "e1": 0.5,
                "e2": 0.5,
                "gamma": 2.5,
                "center_x": 10,
                "center_y": 10,
            }
        )
        # 2nd layer: external SHEAR
        fixed_lens.append({"ra_0": 0, "dec_0": 0})
        kwargs_lens_init.append({"gamma_ext": 0.05, "psi_ext": 0.0})
        kwargs_lens_sigma.append({"gamma_ext": 0.05, "psi_ext": np.pi})
        kwargs_lower_lens.append({"gamma_ext": 0, "psi_ext": -np.pi})
        kwargs_upper_lens.append({"gamma_ext": 0.3, "psi_ext": np.pi})
        
        # 3rd layer: external CONVERGENCE
		kwargs_lens_init.append({'kappa_ext': 0.12})
		kwargs_lens_sigma.append({'kappa_ext': 0.06})
		kwargs_lower_lens.append({'kappa_ext': 0.0})
		kwargs_upper_lens.append({'kappa_ext': 0.3})
        
        # combined lens model
        lens_params = [
            kwargs_lens_init,
            kwargs_lens_sigma,
            fixed_lens,
            kwargs_lower_lens,
            kwargs_upper_lens,
        ]

        # image position parameters
        point_source_list = ["LENSED_POSITION"]
        # we fix the image position coordinates
        fixed_ps = [{}]
        # the initial guess for the appearing image positions is:
        # at the image position.
        kwargs_ps_init = [{"ra_image": ximg, "dec_image": yimg}]
        # let some freedome in how well the actual image positions are
        # matching those given by the data (indicated as 'ra_image', 'dec_image'
        # and held fixed while fitting)
        kwargs_ps_sigma = [
            {
                "ra_image": 0.01 * np.ones(len(ximg)),
                "dec_image": 0.01 * np.ones(len(ximg)),
            }
        ]
        kwargs_lower_ps = [
            {
                "ra_image": -10 * np.ones(len(ximg)),
                "dec_image": -10 * np.ones(len(ximg)),
            }
        ]
        kwargs_upper_ps = [
            {"ra_image": 10 * np.ones(len(ximg)), "dec_image": 10 * np.ones(len(ximg))}
        ]

        ps_params = [
            kwargs_ps_init,
            kwargs_ps_sigma,
            fixed_ps,
            kwargs_lower_ps,
            kwargs_upper_ps,
        ]

        # quasar source size
        fixed_special = {}
        kwargs_special_init = {}
        kwargs_special_sigma = {}
        kwargs_lower_special = {}
        kwargs_upper_special = {}

        fixed_special["source_size"] = source_size_arcsec
        kwargs_special_init["source_size"] = source_size_arcsec
        kwargs_special_sigma["source_size"] = source_size_arcsec
        kwargs_lower_special["source_size"] = 0.0001
        kwargs_upper_special["source_size"] = 1

        # Time-delay distance
        kwargs_special_init["D_dt"] = 4300   # corresponds to H0 ~ 70
        kwargs_special_sigma["D_dt"] = 3000
        kwargs_lower_special["D_dt"] = 2500  # corresponds to H0 ~ 120
        kwargs_upper_special["D_dt"] = 14000 # corresponds to H0 ~ 20

        special_params = [
            kwargs_special_init,
            kwargs_special_sigma,
            fixed_special,
            kwargs_lower_special,
            kwargs_upper_special,
        ]

        # combined parameter settings
        kwargs_params = {
            "lens_model": lens_params,
            "point_source_model": ps_params,
            "special": special_params,
        }

        # our model choices
        kwargs_model = {
            "lens_model_list": lens_model_list,
            "point_source_model_list": point_source_list,
        }
        lensModel = LensModel(kwargs_model["lens_model_list"])
        lensModelExtensions = LensModelExtensions(lensModel=lensModel)
        lensEquationSolver = LensEquationSolver(lensModel=lensModel)

        # setup options for likelihood and parameter sampling
        time_delay_likelihood = True
        flux_ratio_likelihood = True
        image_position_likelihood = True
        kwargs_flux_compute = {
            "source_type": "INF",
            "window_size": window_size,
            "grid_number": grid_number,
        }

        kwargs_constraints = {
            "num_point_source_list": [int(args["nimgs"])],
            # any proposed lens model must satisfy the image positions
            # appearing at the position of the point sources being sampeld
            # "solver_type": "PROFILE_SHEAR",
            "Ddt_sampling": time_delay_likelihood,
            # sampling of the time-delay distance
            # explicit modelling of the astrometric imperfection of
            # the point source positions
            "point_source_offset": True,
        }

        # explicit sampling of finite source size parameter
        # (only use when source_type='GAUSSIAN' or 'TORUS')
        if (
            kwargs_flux_compute["source_type"] in ["GAUSSIAN", "TORUS"]
            and flux_ratio_likelihood is True
        ):
            kwargs_constraints["source_size"] = True

        # e.g. power-law mass slope of the main deflector
        # [[index_model, 'param_name', mean, 1-sigma error], [...], ...]
        prior_lens = [[0, "gamma", 2, 0.1]]
        prior_special = []

        kwargs_likelihood = {
            "position_uncertainty": args["astrometry_sigma"],
            "source_position_likelihood": True,
            "image_position_likelihood": True,
            "time_delay_likelihood": True,
            "flux_ratio_likelihood": True,
            "kwargs_flux_compute": kwargs_flux_compute,
            "prior_lens": prior_lens,
            "prior_special": prior_special,
            "check_solver": True,
            "solver_tolerance": 0.001,
            "check_bounds": True,
        }

        fitting_seq = FittingSequence(
            kwargs_data_joint,
            kwargs_model,
            kwargs_constraints,
            kwargs_likelihood,
            kwargs_params,
        )
        fitting_kwargs_list = [
            ["PSO", {"sigma_scale": 1.0, "n_particles": 200, "n_iterations": 500}]
        ]

        chain_list_pso = fitting_seq.fit_sequence(fitting_kwargs_list)
        kwargs_result = fitting_seq.best_fit()
        kwargs_result = fitting_seq.best_fit(bijective=True)
        args_result = fitting_seq.param_class.kwargs2args(**kwargs_result)
        logL, _ = fitting_seq.likelihoodModule.logL(args_result, verbose=True)

        # and now we run the MCMC
        fitting_kwargs_list = [
            [
                "MCMC",
                {"n_burn": 400, "n_run": 600, "walkerRatio": 10, "sigma_scale": 0.1},
            ]
        ]
        chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)
        kwargs_result = fitting_seq.best_fit()
        # print("number of non-linear parameters in the MCMC process: ", len(param_mcmc))
        # print("parameters in order: ", param_mcmc)
        print("number of evaluations in the MCMC process: ", np.shape(samples_mcmc)[0])

        param = Param(
            kwargs_model,
            fixed_lens,
            kwargs_fixed_ps=fixed_ps,
            kwargs_fixed_special=fixed_special,
            kwargs_lens_init=kwargs_result["kwargs_lens"],
            **kwargs_constraints,
        )
        # the number of non-linear parameters and their names #
        num_param, param_list = param.num_param()

        for i in range(len(samples_mcmc)):
            kwargs_out = param.args2kwargs(samples_mcmc[i])
            kwargs_lens_out, kwargs_special_out, kwargs_ps_out = (
                kwargs_out["kwargs_lens"],
                kwargs_out["kwargs_special"],
                kwargs_out["kwargs_ps"],
            )

            # compute 'real' image position adding potential astrometric shifts
            x_pos = kwargs_ps_out[0]["ra_image"]
            y_pos = kwargs_ps_out[0]["dec_image"]

            # extract quantities of the main deflector
            theta_E = kwargs_lens_out[0]["theta_E"]
            gamma = kwargs_lens_out[0]["gamma"]
            e1, e2 = kwargs_lens_out[0]["e1"], kwargs_lens_out[0]["e2"]
            phi, q = param_util.ellipticity2phi_q(e1, e2)
            phi_ext, gamma_ext = (
                kwargs_lens_out[1]["psi_ext"] % np.pi,
                kwargs_lens_out[1]["gamma_ext"],
            )
            if flux_ratio_likelihood is True:
                mag = lensModel.magnification(x_pos, y_pos, kwargs_lens_out)
                flux_ratio_fit = mag[1:] / mag[0]
            if (
                kwargs_constraints.get("source_size", False) is True
                and "source_size" not in fixed_special
            ):
                source_size = kwargs_special_out["source_size"]
            if time_delay_likelihood is True:
                D_dt = kwargs_special_out["D_dt"]

        # and here the predicted angular diameter distance from a
        # default cosmology (attention for experimenter bias!)
        gamma = np.median(gamma)
        phi_ext = np.median(phi_ext)
        gamma_ext = np.median(gamma_ext)
        theta_E = np.median(theta_E)
        D_dt = np.median(D_dt)
        results["gamma"].append(gamma)
        results["phi_ext"].append(phi_ext)
        results["gamma_ext"].append(gamma_ext)
        results["theta_E"].append(theta_E)
        results["H0"].append(c_light / D_dt)
Esempio n. 22
0
def sl_sys_analysis():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        print(":Registered %d processes" % comm_size)
        args["infile"] = sys.argv[1]
        args["nimgs"] = sys.argv[2]
        args["los"] = sys.argv[3]
        args["version"] = sys.argv[4]
        args["dt_sigma"] = float(sys.argv[5])
        args["image_amps_sigma"] = float(sys.argv[6])
        args["flux_ratio_errors"] = float(sys.argv[7])
        args["astrometry_sigma"] = float(sys.argv[8])

    args = comm.bcast(args)
    # Organize devision of strong lensing systems
    with open(args["infile"], "r") as myfile:
        limg_data = myfile.read()
    systems = json.loads(limg_data)
    sys_nr_per_proc = int(len(systems) / comm_size)
    print('comm_rank', comm_rank)
    start_sys = sys_nr_per_proc * comm_rank
    end_sys = sys_nr_per_proc * (comm_rank + 1)
    print(start_sys, end_sys)
    with open("../lens_catalogs_sie_only.json", "r") as myfile:
        limg_data = myfile.read()
    systems_prior = json.loads(limg_data)

    if comm_rank == 0:
        print("Each process will have %d systems" % sys_nr_per_proc)
        print("That should take app. %f min." % (sys_nr_per_proc * 20))

    results = {
        "gamma": [],
        "phi_ext": [],
        "gamma_ext": [],
        "theta_E": [],
        "D_dt": [],
    }
    for ii in range(len(systems))[(start_sys + 2):end_sys]:
        system = systems[ii]
        system_prior = systems_prior[ii]
        print("Analysing system ID: %d" % ii)
        print(system)
        # the data set is
        z_lens = system_prior["zl"]
        z_source = 2.0

        # multiple images properties
        ximg = np.zeros(system["nimgs"])
        yimg = np.zeros(system["nimgs"])
        delay = np.zeros(system["nimgs"])
        image_amps = np.zeros(system["nimgs"])
        for jj in range(system["nimgs"]):
            ximg[jj] = system["ximg"][jj]  #[arcsec]
            yimg[jj] = system["yimg"][jj]  #[arcsec]
            delay[jj] = system["delay"][jj]  #[days]
            image_amps[jj] = system["mags"][jj]  #[linear units or magnitudes]
        # sort by arrival time
        index_sort = np.argsort(delay)
        ximg = ximg[index_sort]
        yimg = yimg[index_sort]
        delay = delay[index_sort]
        image_amps = image_amps[index_sort]
        d_dt = delay[1:] - delay[0]
        # measurement uncertainties
        d_dt_sigma = np.ones(system["nimgs"] - 1) * args["dt_sigma"]
        image_amps_sigma = np.ones(system["nimgs"]) * args["image_amps_sigma"]
        flux_ratios = image_amps[1:] - image_amps[0]
        flux_ratio_errors = np.ones(system["nimgs"] -
                                    1) * args["flux_ratio_errors"]

        # lens model choices
        lens_model_list = ["SPEP", "SHEAR"]
        # first choice: SPEP
        fixed_lens = []
        kwargs_lens_init = []
        kwargs_lens_sigma = []
        kwargs_lower_lens = []
        kwargs_upper_lens = []
        fixed_lens.append({})
        kwargs_lens_init.append({
            "theta_E": 1.0,
            "gamma": 2,
            "center_x": 0,
            "center_y": 0,
            "e1": 0,
            "e2": 0.0,
        })
        # error
        kwargs_lens_sigma.append({
            "theta_E": 0.2,
            "e1": 0.1,
            "e2": 0.1,
            "gamma": 0.1,
            "center_x": 0.1,
            "center_y": 0.1,
        })
        # lower limit
        kwargs_lower_lens.append({
            "theta_E": 0.01,
            "e1": -0.5,
            "e2": -0.5,
            "gamma": 1.5,
            "center_x": -10,
            "center_y": -10,
        })
        # upper limit
        kwargs_upper_lens.append({
            "theta_E": 10,
            "e1": 0.5,
            "e2": 0.5,
            "gamma": 2.5,
            "center_x": 10,
            "center_y": 10,
        })
        # second choice: SHEAR
        fixed_lens.append({"ra_0": 0, "dec_0": 0})
        kwargs_lens_init.append({"e1": 0.0, "e2": 0.0})
        kwargs_lens_sigma.append({"e1": 0.1, "e2": 0.1})
        kwargs_lower_lens.append({"e1": -0.2, "e2": -0.2})
        kwargs_upper_lens.append({"e1": 0.2, "e2": 0.2})
        lens_params = [
            kwargs_lens_init,
            kwargs_lens_sigma,
            fixed_lens,
            kwargs_lower_lens,
            kwargs_upper_lens,
        ]

        point_source_list = ["LENSED_POSITION"]
        fixed_ps = [{"ra_image": ximg, "dec_image": yimg}]
        kwargs_ps_init = fixed_ps
        # let some freedome in how well the actual image positions are
        # matching those given by the data (indicated as 'ra_image', 'dec_image'
        # and held fixed while fitting)
        kwargs_ps_sigma = [{
            "ra_image": 0.01 * np.ones(len(ximg)),
            "dec_image": 0.01 * np.ones(len(ximg)),
        }]
        kwargs_lower_ps = [{
            "ra_image": -10 * np.ones(len(ximg)),
            "dec_image": -10 * np.ones(len(ximg)),
        }]
        kwargs_upper_ps = [{
            "ra_image": 10 * np.ones(len(ximg)),
            "dec_image": 10 * np.ones(len(ximg))
        }]

        ps_params = [
            kwargs_ps_init,
            kwargs_ps_sigma,
            fixed_ps,
            kwargs_lower_ps,
            kwargs_upper_ps,
        ]

        fixed_cosmo = {}
        kwargs_cosmo_init = {
            "D_dt": 5000,
            "delta_x_image": np.zeros_like(ximg),
            "delta_y_image": np.zeros_like(ximg),
        }
        kwargs_cosmo_sigma = {
            "D_dt": 10000,
            "delta_x_image": np.ones_like(ximg) * args["astrometry_sigma"],
            "delta_y_image": np.ones_like(ximg) * args["astrometry_sigma"],
        }
        kwargs_lower_cosmo = {
            "D_dt": 0,
            "delta_x_image": np.ones_like(ximg) * (-1),
            "delta_y_image": np.ones_like(ximg) * (-1),
        }
        kwargs_upper_cosmo = {
            "D_dt": 10000,
            "delta_x_image": np.ones_like(ximg) * (1),
            "delta_y_image": np.ones_like(ximg) * (1),
        }
        cosmo_params = [
            kwargs_cosmo_init,
            kwargs_cosmo_sigma,
            fixed_cosmo,
            kwargs_lower_cosmo,
            kwargs_upper_cosmo,
        ]

        kwargs_params = {
            "lens_model": lens_params,
            "point_source_model": ps_params,
            "cosmography": cosmo_params,
        }

        # setup options for likelihood and parameter sampling
        kwargs_constraints = {
            "num_point_source_list": [int(args["nimgs"])],
            # any proposed lens model must satisfy the image positions
            # appearing at the position of the point sources being sampeld
            "solver_type": "PROFILE_SHEAR",
            "cosmo_type": "D_dt",
            # sampling of the time-delay distance
            # explicit modelling of the astrometric imperfection of
            # the point source positions
            "point_source_offset": True,
        }
        kwargs_likelihood = {
            "check_bounds": True,
            "point_source_likelihood": True,
            "position_uncertainty": args["astrometry_sigma"],
            "check_solver": True,
            "solver_tolerance": 0.001,
            "time_delay_likelihood": True,
            "image_likelihood": False,
            # this needs to be explicitly given when not having imaging data
            "flux_ratio_likelihood": True,  # enables the flux ratio likelihood
        }
        kwargs_data_joint = {
            "time_delays_measured": d_dt,
            "time_delays_uncertainties": d_dt_sigma,
            "flux_ratios": flux_ratios,
            "flux_ratio_errors": flux_ratio_errors,
        }
        kwargs_model = {
            "lens_model_list": lens_model_list,
            "point_source_model_list": point_source_list,
        }

        mpi = False  # MPI possible, but not supported through that notebook.
        fitting_seq = FittingSequence(
            kwargs_data_joint,
            kwargs_model,
            kwargs_constraints,
            kwargs_likelihood,
            kwargs_params,
        )
        fitting_kwargs_list = [
            # ['update_settings', {'lens_add_fixed': [[0, ['gamma']]]}],
            [
                "PSO", {
                    "sigma_scale": 1.0,
                    "n_particles": 100,
                    "n_iterations": 100
                }
            ]
        ]

        chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(
            fitting_kwargs_list)
        lens_result, source_result, lens_light_result, ps_result, cosmo_result = (
            fitting_seq.best_fit())

        # and now we run the MCMC
        fitting_kwargs_list = [
            [
                "PSO", {
                    "sigma_scale": 0.1,
                    "n_particles": 100,
                    "n_iterations": 100
                }
            ],
            [
                "MCMC",
                {
                    "n_burn": 200,
                    "n_run": 200,
                    "walkerRatio": 10,
                    "sigma_scale": 0.1
                },
            ],
        ]
        chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(
            fitting_kwargs_list)
        lens_result, source_result, lens_light_result, ps_result, cosmo_result = (
            fitting_seq.best_fit())
        # print("number of non-linear parameters in the MCMC process: ", len(param_mcmc))
        # print("parameters in order: ", param_mcmc)
        print("number of evaluations in the MCMC process: ",
              np.shape(samples_mcmc)[0])

        param = Param(
            kwargs_model,
            fixed_lens,
            kwargs_fixed_ps=fixed_ps,
            kwargs_fixed_cosmo=fixed_cosmo,
            kwargs_lens_init=lens_result,
            **kwargs_constraints,
        )
        # the number of non-linear parameters and their names
        num_param, param_list = param.num_param()

        lensAnalysis = LensAnalysis(kwargs_model)

        mcmc_new_list = []
        labels_new = [
            r"$\gamma$",
            r"$\phi_{ext}$",
            r"$\gamma_{ext}$",
            r"$D_{\Delta t}$",
        ]

        D_dt = np.zeros(len(samples_mcmc))
        theta_E = np.zeros(len(samples_mcmc))
        for i in range(len(samples_mcmc)):
            # transform the parameter position of the MCMC chain in a
            # lenstronomy convention with keyword arguments
            kwargs_lens_out, kwargs_light_source_out, kwargs_light_lens_out, kwargs_ps_out, kwargs_cosmo = param.args2kwargs(
                samples_mcmc[i])
            D_dt[i] = kwargs_cosmo["D_dt"]
            gamma[i] = kwargs_lens_out[0]["gamma"]
            theta_E[i] = kwargs_lens_out[0]['theta_E']
            e1[i] = kwargs_lens_out[0]['e1']
            e2[i] = kwargs_lens_out[0]['e2']
            phi_ext, gamma_ext = lensAnalysis._lensModelExtensions.external_shear(
                kwargs_lens_out)

        # plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True)
        # and here the predicted angular diameter distance from a
        # default cosmology (attention for experimenter bias!)
        cosmo = FlatLambdaCDM(
            H0=71,
            Om0=0.3089,
            Ob0=0.,
        )
        lensCosmo = LensCosmo(
            cosmo=cosmo,
            z_lens=z_lens,
            z_source=z_source,
        )
        gamma = np.mean(gamma)
        phi_ext = np.mean(phi_ext)
        gamma_ext = np.mean(gamma_ext)
        theta_E = np.mean(theta_E)
        D_dt = np.mean(D_dt)
        results["gamma"].append(gamma)
        results["phi_ext"].append(phi_ext)
        results["gamma_ext"].append(gamma_ext)
        results["theta_E"].append(theta_E)
        results["D_dt"].append(lensCosmo.D_dt)

    with open(
            "./quasars_%s_nimgs_%s_%s.json" %
        (args["los"], args["nimgs"], args["version"]), 'w') as fout:
        json.dump(results, fout)
Esempio n. 23
0
    def test_fitting_sequence(self):
        # kwargs_init = [self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps]
        lens_sigma = [{
            'theta_E': 0.1,
            'gamma': 0.1,
            'e1': 0.1,
            'e2': 0.1,
            'center_x': 0.1,
            'center_y': 0.1
        }, {
            'e1': 0.1,
            'e2': 0.1
        }]
        lens_lower = [{
            'theta_E': 0.,
            'gamma': 1.5,
            'center_x': -2,
            'center_y': -2,
            'e1': -0.4,
            'e2': -0.4
        }, {
            'e1': -0.3,
            'e2': -0.3
        }]
        lens_upper = [{
            'theta_E': 10.,
            'gamma': 2.5,
            'center_x': 2,
            'center_y': 2,
            'e1': 0.4,
            'e2': 0.4
        }, {
            'e1': 0.3,
            'e2': 0.3
        }]
        source_sigma = [{
            'R_sersic': 0.05,
            'n_sersic': 0.5,
            'center_x': 0.1,
            'center_y': 0.1,
            'e1': 0.1,
            'e2': 0.1
        }]
        source_lower = [{
            'R_sersic': 0.01,
            'n_sersic': 0.5,
            'center_x': -2,
            'center_y': -2,
            'e1': -0.4,
            'e2': -0.4
        }]
        source_upper = [{
            'R_sersic': 10,
            'n_sersic': 5.5,
            'center_x': 2,
            'center_y': 2,
            'e1': 0.4,
            'e2': 0.4
        }]

        lens_light_sigma = [{
            'R_sersic': 0.05,
            'n_sersic': 0.5,
            'center_x': 0.1,
            'center_y': 0.1
        }]
        lens_light_lower = [{
            'R_sersic': 0.01,
            'n_sersic': 0.5,
            'center_x': -2,
            'center_y': -2
        }]
        lens_light_upper = [{
            'R_sersic': 10,
            'n_sersic': 5.5,
            'center_x': 2,
            'center_y': 2
        }]
        ps_sigma = [{'ra_source': 1, 'dec_source': 1, 'point_amp': 1}]

        lens_param = self.kwargs_lens, lens_sigma, [{}, {
            'ra_0': 0,
            'dec_0': 0
        }], lens_lower, lens_upper
        source_param = self.kwargs_source, source_sigma, [
            {}
        ], source_lower, source_upper
        lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{
            'center_x':
            0
        }], lens_light_lower, lens_light_upper
        ps_param = self.kwargs_ps, ps_sigma, [{}
                                              ], self.kwargs_ps, self.kwargs_ps

        kwargs_params = {
            'lens_model': lens_param,
            'source_model': source_param,
            'lens_light_model': lens_light_param,
            'point_source_model': ps_param,
            # 'cosmography': cosmo_param
        }
        # kwargs_params = [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_init, kwargs_init]
        image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]
        multi_band_list = [image_band]
        kwargs_data_joint = {
            'multi_band_list': multi_band_list,
            'multi_band_type': 'multi-linear'
        }
        fittingSequence = FittingSequence(kwargs_data_joint, self.kwargs_model,
                                          self.kwargs_constraints,
                                          self.kwargs_likelihood,
                                          kwargs_params)

        kwargs_result = fittingSequence.best_fit(bijective=False)
        lens_temp = kwargs_result['kwargs_lens']
        npt.assert_almost_equal(lens_temp[0]['theta_E'],
                                self.kwargs_lens[0]['theta_E'],
                                decimal=2)

        logL = fittingSequence.best_fit_likelihood
        print(logL, 'test')
        #print(lens_temp, source_temp, lens_light_temp, ps_temp, cosmo_temp)
        assert logL < 0
        bic = fittingSequence.bic
        assert bic > 0
        #npt.assert_almost_equal(bic, 20000000220.29376, decimal=-4)

        #npt.assert_almost_equal(logL, -10000000061.792593, decimal=-4)

        n_p = 2
        n_i = 2
        fitting_list = []

        kwargs_pso = {
            'sigma_scale': 1,
            'n_particles': n_p,
            'n_iterations': n_i
        }
        fitting_list.append(['PSO', kwargs_pso])
        kwargs_mcmc = {
            'sigma_scale': 0.1,
            'n_burn': 1,
            'n_run': 1,
            'walkerRatio': 2
        }
        fitting_list.append(['MCMC', kwargs_mcmc])
        kwargs_mcmc['re_use_samples'] = True
        fitting_list.append(['MCMC', kwargs_mcmc])
        kwargs_mcmc['sampler_type'] = 'EMCEE'
        fitting_list.append(['MCMC', kwargs_mcmc])
        kwargs_align = {
            'lowerLimit': -0.1,
            'upperLimit': 0.1,
            'n_particles': 2,
            'n_iterations': 2
        }
        fitting_list.append(['align_images', kwargs_align])
        kwargs_psf_iter = {
            'num_iter': 2,
            'psf_iter_factor': 0.5,
            'stacking_method': 'mean'
        }
        fitting_list.append(['psf_iteration', kwargs_psf_iter])
        fitting_list.append(['restart', None])
        fitting_list.append(['fix_not_computed', {'free_bands': [True]}])
        n_sersic_overwrite = 4
        kwargs_update = {
            'lens_light_add_fixed': [[0, ['n_sersic'], [n_sersic_overwrite]]],
            'lens_light_remove_fixed': [[0, ['center_x']]],
            'change_source_lower_limit': [[0, ['n_sersic'], [0.1]]],
            'change_source_upper_limit': [[0, ['n_sersic'], [10]]]
        }
        fitting_list.append(['update_settings', kwargs_update])

        #kwargs_model = {}, kwargs_constraints = {}, kwargs_likelihood = {}, lens_add_fixed = [],
        #source_add_fixed = [], lens_light_add_fixed = [], ps_add_fixed = [], cosmo_add_fixed = [], lens_remove_fixed = [],
        #source_remove_fixed = [], lens_light_remove_fixed = [], ps_remove_fixed = [], cosmo_remove_fixed = []

        chain_list = fittingSequence.fit_sequence(fitting_list)
        lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed, extinction_fixed = fittingSequence._updateManager._fixed_kwargs
        kwargs_result = fittingSequence.best_fit(bijective=False)
        npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'],
                                self.kwargs_lens[0]['theta_E'],
                                decimal=1)
        npt.assert_almost_equal(
            fittingSequence._updateManager._lens_light_fixed[0]['n_sersic'],
            n_sersic_overwrite,
            decimal=8)
        npt.assert_almost_equal(lens_light_fixed[0]['n_sersic'], 4, decimal=-1)
        assert fittingSequence._updateManager._lower_kwargs[1][0][
            'n_sersic'] == 0.1
        assert fittingSequence._updateManager._upper_kwargs[1][0][
            'n_sersic'] == 10

        # Nested sampler tests
        # further decrease the parameter space for nested samplers to run faster
        fitting_list2 = []
        kwargs_update2 = {
            'ps_add_fixed': [[0, ['ra_source', 'dec_source'], [0, 0]]],
            'lens_light_add_fixed': [[
                0, ['n_sersic', 'R_sersic', 'center_x', 'center_y'],
                [4, .1, 0, 0]
            ]],
            'source_add_fixed': [[
                0, ['R_sersic', 'e1', 'e2', 'center_x', 'center_y'],
                [.6, .1, .1, 0, 0]
            ]],
            'lens_add_fixed': [[
                0, ['gamma', 'theta_E', 'e1', 'e2', 'center_x', 'center_y'],
                [1.8, 1., .1, .1, 0, 0]
            ], [1, ['e1', 'e2'], [0.01, 0.01]]],
            'change_source_lower_limit': [[0, ['n_sersic'], [2.9]]],
            'change_source_upper_limit': [[0, ['n_sersic'], [3.1]]]
        }
        fitting_list2.append(['update_settings', kwargs_update2])
        kwargs_multinest = {
            'sampler_type': 'MULTINEST',
            'kwargs_run': {
                'n_live_points': 10,
                'evidence_tolerance': 0.5,
                'sampling_efficiency':
                0.8,  # 1 for posterior-only, 0 for evidence-only
                'importance_nested_sampling': False,
                'multimodal': True,
                'const_efficiency_mode':
                False,  # reduce sampling_efficiency to 5% when True
            },
            'remove_output_dir': True,
        }
        fitting_list2.append(['nested_sampling', kwargs_multinest])
        kwargs_dynesty = {
            'sampler_type': 'DYNESTY',
            'kwargs_run': {
                'dlogz_init': 0.01,
                'nlive_init': 3,
                'nlive_batch': 3,
                'maxbatch': 1,
            },
        }
        fitting_list2.append(['nested_sampling', kwargs_dynesty])
        kwargs_dypolychord = {
            'sampler_type': 'DYPOLYCHORD',
            'kwargs_run': {
                'ninit': 8,
                'nlive_const': 10,
                #'seed_increment': 1,
                'resume_dyn_run': False,
                #'init_step': 10,
            },
            'polychord_settings': {
                'seed': 1,
                #'num_repeats': 20
            },
            'dypolychord_dynamic_goal':
            0.8,  # 1 for posterior-only, 0 for evidence-only
            'remove_output_dir': True,
        }
        fitting_list2.append(['nested_sampling', kwargs_dypolychord])

        chain_list2 = fittingSequence.fit_sequence(fitting_list2)
        kwargs_fixed = fittingSequence._updateManager._fixed_kwargs
        npt.assert_almost_equal(kwargs_fixed[0][1]['e1'], 0.01, decimal=2)
        assert fittingSequence._updateManager._lower_kwargs[1][0][
            'n_sersic'] == 2.9
        assert fittingSequence._updateManager._upper_kwargs[1][0][
            'n_sersic'] == 3.1

        kwargs_test = {'kwargs_lens': 1}
        fittingSequence.update_state(kwargs_test)
        kwargs_out = fittingSequence.best_fit(bijective=True)
        assert kwargs_out['kwargs_lens'] == 1
    def test_minimizer(self):
        n_p = 2
        n_i = 2

        fitting_list = []
        kwargs_simplex = {'n_iterations': n_i, 'method': 'Nelder-Mead'}
        fitting_list.append(['SIMPLEX', kwargs_simplex])
        kwargs_simplex = {'n_iterations': n_i, 'method': 'Powell'}
        fitting_list.append(['SIMPLEX', kwargs_simplex])
        kwargs_pso = {
            'sigma_scale': 1,
            'n_particles': n_p,
            'n_iterations': n_i
        }
        fitting_list.append(['PSO', kwargs_pso])
        kwargs_mcmc = {
            'sigma_scale': 1,
            'n_burn': 1,
            'n_run': 1,
            'n_walkers': 10,
            'sampler_type': 'EMCEE'
        }
        fitting_list.append(['MCMC', kwargs_mcmc])
        kwargs_mcmc['re_use_samples'] = True
        kwargs_mcmc['init_samples'] = np.array([[np.random.normal(1, 0.001)]
                                                for i in range(100)])
        fitting_list.append(['MCMC', kwargs_mcmc])

        def custom_likelihood(kwargs_lens,
                              kwargs_source=None,
                              kwargs_lens_light=None,
                              kwargs_ps=None,
                              kwargs_special=None,
                              kwargs_extinction=None):
            theta_E = kwargs_lens[0]['theta_E']
            return -(theta_E - 1.)**2 / 0.1**2 / 2

        kwargs_likelihood = {'custom_logL_addition': custom_likelihood}

        kwargs_data_joint = {'multi_band_list': []}
        kwargs_model = {'lens_model_list': ['SIS']}
        kwargs_constraints = {}
        lens_param = [{
            'theta_E': 1,
            'center_x': 0,
            'center_y': 0
        }], [{
            'theta_E': 0.1,
            'center_x': 0.1,
            'center_y': 0.1
        }], [{
            'center_x': 0,
            'center_y': 0
        }], [{
            'theta_E': 0,
            'center_x': -10,
            'center_y': -10
        }], [{
            'theta_E': 10,
            'center_x': 10,
            'center_y': 10
        }]

        kwargs_params = {'lens_model': lens_param}
        fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model,
                                          kwargs_constraints,
                                          kwargs_likelihood, kwargs_params)
        args = fittingSequence.param_class.kwargs2args(
            kwargs_lens=[{
                'theta_E': 1,
                'center_x': 0,
                'center_y': 0
            }])
        kwargs_result = fittingSequence.param_class.args2kwargs(args)
        print(kwargs_result)
        print(args, 'test args')
        chain_list = fittingSequence.fit_sequence(fitting_list)
        kwargs_result = fittingSequence.best_fit(bijective=False)
        npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'],
                                1,
                                decimal=2)
Esempio n. 25
0
def main():
    args = script_utils.parse_inference_args()
    test_cfg = TestConfig.from_file(args.test_config_file_path)
    baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path)
    cfg = TrainValConfig.from_file(test_cfg.train_val_config_file_path)
    # Set device and default data type
    device = torch.device(test_cfg.device_type)
    if device.type == 'cuda':
        torch.set_default_tensor_type('torch.cuda.' + cfg.data.float_type)
    else:
        torch.set_default_tensor_type('torch.' + cfg.data.float_type)
    script_utils.seed_everything(test_cfg.global_seed)

    ############
    # Data I/O #
    ############
    train_data = XYData(
        is_train=True,
        Y_cols=cfg.data.Y_cols,
        float_type=cfg.data.float_type,
        define_src_pos_wrt_lens=cfg.data.define_src_pos_wrt_lens,
        rescale_pixels=cfg.data.rescale_pixels,
        rescale_pixels_type=cfg.data.rescale_pixels_type,
        log_pixels=cfg.data.log_pixels,
        add_pixel_noise=cfg.data.add_pixel_noise,
        eff_exposure_time=cfg.data.eff_exposure_time,
        train_Y_mean=None,
        train_Y_std=None,
        train_baobab_cfg_path=cfg.data.train_baobab_cfg_path,
        val_baobab_cfg_path=None,
        for_cosmology=False)
    # Define val data and loader
    test_data = XYData(
        is_train=False,
        Y_cols=cfg.data.Y_cols,
        float_type=cfg.data.float_type,
        define_src_pos_wrt_lens=cfg.data.define_src_pos_wrt_lens,
        rescale_pixels=cfg.data.rescale_pixels,
        rescale_pixels_type=cfg.data.rescale_pixels_type,
        log_pixels=cfg.data.log_pixels,
        add_pixel_noise=cfg.data.add_pixel_noise,
        eff_exposure_time=cfg.data.eff_exposure_time,
        train_Y_mean=train_data.train_Y_mean,
        train_Y_std=train_data.train_Y_std,
        train_baobab_cfg_path=cfg.data.train_baobab_cfg_path,
        val_baobab_cfg_path=test_cfg.data.test_baobab_cfg_path,
        for_cosmology=True)
    master_truth = test_data.Y_df
    master_truth = metadata_utils.add_qphi_columns(master_truth)
    master_truth = metadata_utils.add_gamma_psi_ext_columns(master_truth)
    # Figure out how many lenses BNN will predict on (must be consecutive)
    if test_cfg.data.lens_indices is None:
        if args.lens_indices_path is None:
            # Test on all n_test lenses in the test set
            n_test = test_cfg.data.n_test
            lens_range = range(n_test)
        else:
            # Test on the lens indices in a text file at the specified path
            lens_range = []
            with open(args.lens_indices_path, "r") as f:
                for line in f:
                    lens_range.append(int(line.strip()))
            n_test = len(lens_range)
            print("Performing H0 inference on {:d} specified lenses...".format(
                n_test))
    else:
        if args.lens_indices_path is None:
            # Test on the lens indices specified in the test config file
            lens_range = test_cfg.data.lens_indices
            n_test = len(lens_range)
            print("Performing H0 inference on {:d} specified lenses...".format(
                n_test))
        else:
            raise ValueError(
                "Specific lens indices were specified in both the test config file and the command-line argument."
            )
    batch_size = max(lens_range) + 1
    test_loader = DataLoader(test_data,
                             batch_size=batch_size,
                             shuffle=False,
                             drop_last=True)
    # Output directory into which the H0 histograms and H0 samples will be saved
    out_dir = test_cfg.out_dir
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print("Destination folder path: {:s}".format(out_dir))
    else:
        raise OSError("Destination folder already exists.")

    #####################
    # Parameter penalty #
    #####################
    # Instantiate original loss function with all BNN-predicted params
    orig_Y_cols = cfg.data.Y_cols
    loss_fn = getattr(h0rton.losses,
                      cfg.model.likelihood_class)(Y_dim=test_data.Y_dim,
                                                  device=device)
    # Not all predicted params will be sampled via MCMC
    params_to_remove = []  #'lens_light_R_sersic', 'src_light_R_sersic']
    mcmc_Y_cols = [col for col in orig_Y_cols if col not in params_to_remove]
    mcmc_Y_dim = len(mcmc_Y_cols)
    # Instantiate loss function with just the MCMC params
    mcmc_loss_fn = getattr(h0rton.losses, cfg.model.likelihood_class)(
        Y_dim=test_data.Y_dim - len(params_to_remove), device=device)
    remove_param_idx, remove_idx = mcmc_utils.get_idx_for_params(
        mcmc_loss_fn.out_dim, orig_Y_cols, params_to_remove,
        cfg.model.likelihood_class)
    mcmc_train_Y_mean = np.delete(train_data.train_Y_mean, remove_param_idx)
    mcmc_train_Y_std = np.delete(train_data.train_Y_std, remove_param_idx)
    parameter_penalty = mcmc_utils.HybridBNNPenalty(
        mcmc_Y_cols, cfg.model.likelihood_class, mcmc_train_Y_mean,
        mcmc_train_Y_std, test_cfg.h0_posterior.exclude_velocity_dispersion,
        device)
    custom_logL_addition = parameter_penalty.evaluate
    null_spread = False

    ###################
    # BNN predictions #
    ###################
    # Instantiate BNN model
    net = getattr(h0rton.models,
                  cfg.model.architecture)(num_classes=loss_fn.out_dim,
                                          dropout_rate=cfg.model.dropout_rate)
    net.to(device)
    # Load trained weights from saved state
    net, epoch = train_utils.load_state_dict_test(test_cfg.state_dict_path,
                                                  net, cfg.optim.n_epochs,
                                                  device)
    # When only generating BNN predictions (and not running MCMC), we can afford more n_dropout
    # otherwise, we fix n_dropout = mcmc_Y_dim + 1
    if test_cfg.export.pred:
        n_dropout = 20
        n_samples_per_dropout = test_cfg.numerics.mcmc.walkerRatio
    else:
        n_walkers = test_cfg.numerics.mcmc.walkerRatio * (
            mcmc_Y_dim + 1)  # (BNN params + D_dt) times walker ratio
        n_dropout = n_walkers // test_cfg.numerics.mcmc.walkerRatio
        n_samples_per_dropout = test_cfg.numerics.mcmc.walkerRatio
    # Initialize arrays that will store samples and BNN predictions
    init_pos = np.empty(
        [batch_size, n_dropout, n_samples_per_dropout, mcmc_Y_dim])
    mcmc_pred = np.empty([batch_size, n_dropout, mcmc_loss_fn.out_dim])
    with torch.no_grad():
        net.train()
        # Send some empty forward passes through the test data without backprop to adjust batchnorm weights
        # (This is often not necessary. Beware if using for just 1 lens.)
        for nograd_pass in range(5):
            for X_, Y_ in test_loader:
                X = X_.to(device)
                _ = net(X)
        # Obtain MC dropout samples
        for d in range(n_dropout):
            net.eval()
            for X_, Y_ in test_loader:
                X = X_.to(device)
                Y = Y_.to(device)
                pred = net(X)
                break
            mcmc_pred_d = pred.cpu().numpy()
            # Replace BNN posterior's primary gaussian mean with truth values
            if test_cfg.lens_posterior_type == 'default_with_truth_mean':
                mcmc_pred_d[:, :len(mcmc_Y_cols)] = Y[:, :len(mcmc_Y_cols
                                                              )].cpu().numpy()
            # Leave only the MCMC parameters in pred
            mcmc_pred_d = mcmc_utils.remove_parameters_from_pred(
                mcmc_pred_d, remove_idx, return_as_tensor=False)
            # Populate pred that will define the MCMC penalty function
            mcmc_pred[:, d, :] = mcmc_pred_d
            # Instantiate posterior to generate BNN samples, which will serve as initial positions for walkers
            bnn_post = getattr(h0rton.h0_inference.gaussian_bnn_posterior_cpu,
                               loss_fn.posterior_name + 'CPU')(
                                   mcmc_Y_dim, mcmc_train_Y_mean,
                                   mcmc_train_Y_std)
            bnn_post.set_sliced_pred(mcmc_pred_d)
            init_pos[:, d, :, :] = bnn_post.sample(
                n_samples_per_dropout, sample_seed=test_cfg.global_seed +
                d)  # contains just the lens model params, no D_dt
            gc.collect()
    # Terminate right after generating BNN predictions (no MCMC)
    if test_cfg.export.pred:
        import sys
        samples_path = os.path.join(out_dir, 'samples.npy')
        np.save(samples_path, init_pos)
        sys.exit()

    #############
    # MCMC loop #
    #############
    # Convolve MC dropout iterates with aleatoric samples
    init_pos = init_pos.transpose(0, 3, 1, 2).reshape(
        [batch_size, mcmc_Y_dim,
         -1]).transpose(0, 2, 1)  # [batch_size, n_samples, mcmc_Y_dim]
    init_D_dt = np.random.uniform(0.0,
                                  15000.0,
                                  size=(batch_size, n_walkers, 1))
    pred_mean = np.mean(init_pos, axis=1)  # [batch_size, mcmc_Y_dim]
    # Define assumed model profiles
    kwargs_model = dict(lens_model_list=['PEMD', 'SHEAR'],
                        point_source_model_list=['SOURCE_POSITION'],
                        source_light_model_list=['SERSIC_ELLIPSE'])
    astro_sig = test_cfg.image_position_likelihood.sigma  # astrometric uncertainty
    # Get H0 samples for each system
    if not test_cfg.time_delay_likelihood.baobab_time_delays:
        if 'abcd_ordering_i' not in master_truth:
            raise ValueError(
                "If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec."
            )
    kwargs_lens_eqn_solver = {
        'min_distance':
        0.05,
        'search_window':
        baobab_cfg.instrument['pixel_scale'] * baobab_cfg.image['num_pix'],
        'num_iter_max':
        200
    }

    total_progress = tqdm(total=n_test)
    realized_time_delays = pd.read_csv(
        test_cfg.error_model.realized_time_delays, index_col=None)
    # For each lens system...
    for i, lens_i in enumerate(lens_range):
        # Each lens gets a unique random state for time delay measurement error realizations.
        #rs_lens = np.random.RandomState(lens_i) # replaced with externally rendered time delays
        ###########################
        # Relevant data and prior #
        ###########################
        data_i = master_truth.iloc[lens_i].copy()
        # Set BNN pred defining parameter penalty for this lens, batch processes across n_dropout
        parameter_penalty.set_bnn_post_params(mcmc_pred[lens_i, :, :])
        # Initialize lens model params walkers at the predictive mean
        init_info = dict(
            zip(mcmc_Y_cols,
                pred_mean[lens_i, :] * mcmc_train_Y_std + mcmc_train_Y_mean))
        lcdm = LCDM(z_lens=data_i['z_lens'],
                    z_source=data_i['z_src'],
                    flat=True)
        true_img_dec = literal_eval(data_i['y_image'])
        n_img = len(true_img_dec)
        measured_td_sig = test_cfg.time_delay_likelihood.sigma
        measured_td_wrt0 = np.array(
            literal_eval(
                realized_time_delays.iloc[lens_i]['measured_td_wrt0']))
        kwargs_data_joint = dict(
            time_delays_measured=measured_td_wrt0,
            time_delays_uncertainties=measured_td_sig,
        )

        #############################
        # Parameter init and bounds #
        #############################
        lens_kwargs = mcmc_utils.get_lens_kwargs(init_info,
                                                 null_spread=null_spread)
        ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(init_info, astro_sig)
        src_light_kwargs = mcmc_utils.get_light_kwargs(
            init_info['src_light_R_sersic'], null_spread=null_spread)
        special_kwargs = mcmc_utils.get_special_kwargs(
            n_img, astro_sig
        )  # image position offset and time delay distance, aka the "special" parameters
        kwargs_params = {
            'lens_model': lens_kwargs,
            'point_source_model': ps_kwargs,
            'source_model': src_light_kwargs,
            'special': special_kwargs,
        }
        if test_cfg.numerics.solver_type == 'NONE':
            solver_type = 'NONE'
        else:
            solver_type = 'PROFILE_SHEAR' if n_img == 4 else 'CENTER'
        #solver_type = 'NONE'
        kwargs_constraints = {
            'num_point_source_list': [n_img],
            'Ddt_sampling': True,
            'solver_type': solver_type,
        }

        kwargs_likelihood = {
            'time_delay_likelihood': True,
            'sort_images_by_dec': True,
            'prior_lens': [],
            'prior_special': [],
            'check_bounds': True,
            'check_matched_source_position': False,
            'source_position_tolerance': 0.01,
            'source_position_sigma': 0.01,
            'source_position_likelihood': False,
            'custom_logL_addition': custom_logL_addition,
            'kwargs_lens_eqn_solver': kwargs_lens_eqn_solver
        }

        ###########################
        # MCMC posterior sampling #
        ###########################
        fitting_seq = FittingSequence(kwargs_data_joint,
                                      kwargs_model,
                                      kwargs_constraints,
                                      kwargs_likelihood,
                                      kwargs_params,
                                      verbose=False,
                                      mpi=False)
        if i == 0:
            param_class = fitting_seq._updateManager.param_class
            n_params, param_class_Y_cols = param_class.num_param()
            init_pos = mcmc_utils.reorder_to_param_class(
                mcmc_Y_cols, param_class_Y_cols, init_pos, init_D_dt)
        # MCMC sample from the post-processed BNN posterior jointly with cosmology
        lens_i_start_time = time.time()
        if test_cfg.lens_posterior_type == 'default':
            test_cfg.numerics.mcmc.update(init_samples=init_pos[lens_i, :, :])
        fitting_kwargs_list_mcmc = [['MCMC', test_cfg.numerics.mcmc]]
        #try:
        with script_utils.HiddenPrints():
            chain_list_mcmc = fitting_seq.fit_sequence(
                fitting_kwargs_list_mcmc)
            kwargs_result_mcmc = fitting_seq.best_fit()
        lens_i_end_time = time.time()
        inference_time = (lens_i_end_time - lens_i_start_time) / 60.0  # min

        #############################
        # Plotting the MCMC samples #
        #############################
        # sampler_type : 'EMCEE'
        # samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]`
        # param_mcmc : list of str of length n_params, the parameter names
        sampler_type, samples_mcmc, param_mcmc, _ = chain_list_mcmc[0]
        new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain(
            kwargs_result_mcmc, samples_mcmc, kwargs_model, lens_kwargs[2],
            ps_kwargs[2], src_light_kwargs[2], special_kwargs[2],
            kwargs_constraints)
        # Plot D_dt histogram
        D_dt_samples = new_samples_mcmc['D_dt'].values
        true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.3)
        data_i['D_dt'] = true_D_dt
        # Export D_dt samples for this lens
        lens_inference_dict = dict(
            D_dt_samples=D_dt_samples,  # kappa_ext=0 for these samples
            inference_time=inference_time,
            true_D_dt=true_D_dt,
        )
        lens_inference_dict_save_path = os.path.join(
            out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i))
        np.save(lens_inference_dict_save_path, lens_inference_dict)
        # Optionally export the MCMC samples
        if test_cfg.export.mcmc_samples:
            mcmc_samples_path = os.path.join(
                out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i))
            new_samples_mcmc.to_csv(mcmc_samples_path, index=None)
        # Optionally export the D_dt histogram
        if test_cfg.export.D_dt_histogram:
            cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal(
                D_dt_samples, 3)
            _ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples,
                                                   lens_i,
                                                   true_D_dt,
                                                   save_dir=out_dir)
        # Optionally export the plot of MCMC chain
        if test_cfg.export.mcmc_chain:
            mcmc_chain_path = os.path.join(
                out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i))
            plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path)
        # Optionally export posterior cornerplot of select lens model parameters with D_dt
        if test_cfg.export.mcmc_corner:
            mcmc_corner_path = os.path.join(
                out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i))
            plotting_utils.plot_mcmc_corner(
                new_samples_mcmc[test_cfg.export.mcmc_cols],
                data_i[test_cfg.export.mcmc_cols],
                test_cfg.export.mcmc_col_labels, mcmc_corner_path)
        total_progress.update(1)
        gc.collect()
    realized_time_delays.to_csv(os.path.join(out_dir,
                                             'realized_time_delays.csv'),
                                index=None)
    total_progress.close()
Esempio n. 26
0
    def swim(self, lens_name, model_id, log=True, mpi=False,
             recipe_name='default', sampler='EMCEE', thread_count=1):
        """
        Run models for a single lens.

        :param lens_name: lens name
        :type lens_name: `str`
        :param model_id: identifier for the model run
        :type model_id: `str`
        :param log: if `True`, all `print` statements will be logged
        :type log: `bool`
        :param mpi: MPI option
        :type mpi: `bool`
        :param recipe_name: recipe for pre-sampling optimization, supported
            ones now: 'default' and 'galaxy-galaxy'
        :type recipe_name: `str`
        :param sampler: 'EMCEE' or 'COSMOHAMMER', cosmohammer is kept for
            legacy
        :type sampler: `str`
        :param thread_count: number of threads if `multiprocess` is used
        :type thread_count: `int`
        :return:
        :rtype:
        """
        pool = choose_pool(mpi=mpi)

        if log and pool.is_master():
            log_file = open(self.file_system.get_log_file_path(lens_name,
                                                               model_id), 'wt')
            sys.stdout = log_file

        config = self.get_lens_config(lens_name)
        recipe = Recipe(config, sampler=sampler, thread_count=thread_count)

        psf_supersampling_factor = config.get_psf_supersampled_factor()
        kwargs_data_joint = self.get_kwargs_data_joint(
            lens_name,
            psf_supersampled_factor=psf_supersampling_factor)

        fitting_sequence = FittingSequence(
            kwargs_data_joint,
            config.get_kwargs_model(),
            config.get_kwargs_constraints(),
            config.get_kwargs_likelihood(),
            config.get_kwargs_params(),
            mpi=mpi
        )

        fitting_kwargs_list = recipe.get_recipe(
                                    kwargs_data_joint=kwargs_data_joint,
                                    recipe_name=recipe_name)
        fit_output = fitting_sequence.fit_sequence(fitting_kwargs_list)
        kwargs_result = fitting_sequence.best_fit(bijective=False)

        output = {
            'settings': config.settings,
            'kwargs_result': kwargs_result,
            'fit_output': fit_output,
        }

        if pool.is_master():
            self.file_system.save_output(lens_name, model_id, output)

        if log and pool.is_master():
            log_file.close()
Esempio n. 27
0
def main():
    args = parse_args()
    test_cfg = TestConfig.from_file(args.test_config_file_path)
    train_val_cfg = TrainValConfig.from_file(
        test_cfg.train_val_config_file_path)
    # Set device and default data type
    device = torch.device(test_cfg.device_type)
    if device.type == 'cuda':
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')
    seed_everything(test_cfg.global_seed)

    ############
    # Data I/O #
    ############
    test_data = TDLMCData(data_cfg=train_val_cfg.data, rung_i=args.rung_idx)
    master_truth = test_data.cosmo_df
    if test_cfg.data.lens_indices is None:
        if args.lens_indices_path is None:
            # Test on all n_test lenses in the test set
            n_test = test_cfg.data.n_test
            lens_range = range(n_test)
        else:
            # Test on the lens indices in a text file at the specified path
            lens_range = []
            with open(args.lens_indices_path, "r") as f:
                for line in f:
                    lens_range.append(int(line.strip()))
            n_test = len(lens_range)
            print("Performing H0 inference on {:d} specified lenses...".format(
                n_test))
    else:
        if args.lens_indices_path is None:
            # Test on the lens indices specified in the test config file
            lens_range = test_cfg.data.lens_indices
            n_test = len(lens_range)
            print("Performing H0 inference on {:d} specified lenses...".format(
                n_test))
        else:
            raise ValueError(
                "Specific lens indices were specified in both the test config file and the command-line argument."
            )
    batch_size = max(lens_range) + 1
    test_loader = DataLoader(test_data,
                             batch_size=batch_size,
                             shuffle=False,
                             drop_last=True)
    # Output directory into which the H0 histograms and H0 samples will be saved
    out_dir = test_cfg.out_dir
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print("Destination folder path: {:s}".format(out_dir))
    else:
        raise OSError("Destination folder already exists.")

    ######################
    # Load trained state #
    ######################
    # Instantiate loss function, to append to the MCMC objective as the prior
    orig_Y_cols = train_val_cfg.data.Y_cols
    loss_fn = getattr(h0rton.losses, train_val_cfg.model.likelihood_class)(
        Y_dim=train_val_cfg.data.Y_dim, device=device)
    # Instantiate MCMC parameter penalty function
    params_to_remove = ['lens_light_R_sersic']  #, 'src_light_R_sersic']
    mcmc_Y_cols = [col for col in orig_Y_cols if col not in params_to_remove]
    mcmc_Y_dim = len(mcmc_Y_cols)
    mcmc_loss_fn = getattr(
        h0rton.losses, train_val_cfg.model.likelihood_class)(
            Y_dim=train_val_cfg.data.Y_dim - len(params_to_remove),
            device=device)
    remove_param_idx, remove_idx = mcmc_utils.get_idx_for_params(
        mcmc_loss_fn.out_dim, orig_Y_cols, params_to_remove,
        train_val_cfg.model.likelihood_class)
    mcmc_train_Y_mean = np.delete(train_val_cfg.data.train_Y_mean,
                                  remove_param_idx)
    mcmc_train_Y_std = np.delete(train_val_cfg.data.train_Y_std,
                                 remove_param_idx)
    parameter_penalty = mcmc_utils.HybridBNNPenalty(
        mcmc_Y_cols, train_val_cfg.model.likelihood_class, mcmc_train_Y_mean,
        mcmc_train_Y_std, test_cfg.h0_posterior.exclude_velocity_dispersion,
        device)
    custom_logL_addition = parameter_penalty.evaluate if test_cfg.lens_posterior_type.startswith(
        'default') else None
    null_spread = True if test_cfg.lens_posterior_type == 'truth' else False
    # Instantiate model
    net = getattr(
        h0rton.models,
        train_val_cfg.model.architecture)(num_classes=loss_fn.out_dim)
    net.to(device)
    # Load trained weights from saved state
    net, epoch = train_utils.load_state_dict_test(test_cfg.state_dict_path,
                                                  net,
                                                  train_val_cfg.optim.n_epochs,
                                                  device)
    with torch.no_grad():
        net.eval()
        for X_ in test_loader:
            X = X_.to(device)
            pred = net(X)
            break

    mcmc_pred = pred.cpu().numpy()
    mcmc_pred = mcmc_utils.remove_parameters_from_pred(mcmc_pred,
                                                       remove_idx,
                                                       return_as_tensor=False)

    # Instantiate posterior for BNN samples, to initialize the walkers
    bnn_post = getattr(h0rton.h0_inference.gaussian_bnn_posterior,
                       loss_fn.posterior_name)(mcmc_Y_dim, device,
                                               mcmc_train_Y_mean,
                                               mcmc_train_Y_std)
    bnn_post.set_sliced_pred(torch.tensor(mcmc_pred))
    n_walkers = test_cfg.numerics.mcmc.walkerRatio * (
        mcmc_Y_dim + 1)  # BNN params + H0 times walker ratio
    init_pos = bnn_post.sample(
        n_walkers, sample_seed=test_cfg.global_seed
    )  # [batch_size, n_walkers, mcmc_Y_dim] contains just the lens model params, no D_dt
    init_D_dt = np.random.uniform(0.0,
                                  10000.0,
                                  size=(batch_size, n_walkers,
                                        1))  # FIXME: init H0 hardcoded

    kwargs_model = dict(lens_model_list=['PEMD', 'SHEAR'],
                        point_source_model_list=['SOURCE_POSITION'],
                        source_light_model_list=['SERSIC_ELLIPSE'])
    astro_sig = test_cfg.image_position_likelihood.sigma
    # Get H0 samples for each system
    if not test_cfg.time_delay_likelihood.baobab_time_delays:
        if 'abcd_ordering_i' not in master_truth:
            raise ValueError(
                "If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec."
            )

    lenses_skipped = []  # keeps track of lenses that skipped MCMC
    total_progress = tqdm(total=n_test)
    # For each lens system...
    for i, lens_i in enumerate(lens_range):
        # Each lens gets a unique random state for td and vd measurement error realizations.
        rs_lens = np.random.RandomState(lens_i)
        ###########################
        # Relevant data and prior #
        ###########################
        data_i = master_truth.iloc[lens_i].copy()
        parameter_penalty.set_bnn_post_params(
            mcmc_pred[lens_i, :])  # set the BNN parameters
        # Init values for the lens model params
        if test_cfg.lens_posterior_type == 'default':
            init_info = dict(
                zip(
                    mcmc_Y_cols,
                    mcmc_pred[lens_i, :len(mcmc_Y_cols)] * mcmc_train_Y_std +
                    mcmc_train_Y_mean))  # mean of primary Gaussian
        else:  # types 'hybrid_with_truth_mean' and 'truth'
            init_info = dict(zip(mcmc_Y_cols,
                                 data_i[mcmc_Y_cols].values))  # truth params
        if not test_cfg.h0_posterior.exclude_velocity_dispersion:
            parameter_penalty.set_vel_disp_params()
            raise NotImplementedError
        lcdm = LCDM(z_lens=data_i['z_lens'],
                    z_source=data_i['z_src'],
                    flat=True)
        # Data is BCD - A with a certain ABCD ordering, so inferred time delays should follow this convention.
        measured_td_wrt0 = np.array(data_i['measured_td'])  # [n_img - 1,]
        measured_td_sig = np.array(data_i['measured_td_err'])  # [n_img - 1,]
        abcd_ordering_i = np.array(data_i['abcd_ordering_i'])
        n_img = len(abcd_ordering_i)
        kwargs_data_joint = dict(
            time_delays_measured=measured_td_wrt0,
            time_delays_uncertainties=measured_td_sig,
            abcd_ordering_i=abcd_ordering_i,
            #vel_disp_measured=measured_vd, # TODO: optionally exclude
            #vel_disp_uncertainty=vel_disp_sig,
        )
        if not test_cfg.h0_posterior.exclude_velocity_dispersion:
            measured_vd = data_i['true_vd'] * (
                1.0 + rs_lens.randn() *
                test_cfg.error_model.velocity_dispersion_frac_error)
            kwargs_data_joint['vel_disp_measured'] = measured_vd
            kwargs_data_joint[
                'vel_disp_sig'] = test_cfg.velocity_dispersion_likelihood.sigma

        #############################
        # Parameter init and bounds #
        #############################
        lens_kwargs = mcmc_utils.get_lens_kwargs(init_info,
                                                 null_spread=null_spread)
        ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(init_info,
                                                       astro_sig,
                                                       null_spread=null_spread)
        src_light_kwargs = mcmc_utils.get_light_kwargs(
            init_info['src_light_R_sersic'], null_spread=null_spread)
        special_kwargs = mcmc_utils.get_special_kwargs(
            n_img, astro_sig, null_spread=null_spread
        )  # image position offset and time delay distance, aka the "special" parameters
        kwargs_params = {
            'lens_model': lens_kwargs,
            'point_source_model': ps_kwargs,
            'source_model': src_light_kwargs,
            'special': special_kwargs,
        }
        if test_cfg.numerics.solver_type == 'NONE':
            solver_type = 'NONE'
        else:
            solver_type = 'PROFILE_SHEAR' if n_img == 4 else 'CENTER'
        #solver_type = 'NONE'
        kwargs_constraints = {
            'num_point_source_list': [n_img],
            'Ddt_sampling': True,
            'solver_type': solver_type,
        }

        kwargs_likelihood = {
            'time_delay_likelihood': True,
            'sort_images_by_dec': True,
            'prior_lens': [],
            'prior_special': [],
            'check_bounds': True,
            'check_matched_source_position': False,
            'source_position_tolerance': 0.01,
            'source_position_sigma': 0.01,
            'source_position_likelihood': False,
            'custom_logL_addition': custom_logL_addition,
        }

        ###########################
        # MCMC posterior sampling #
        ###########################
        fitting_seq = FittingSequence(kwargs_data_joint,
                                      kwargs_model,
                                      kwargs_constraints,
                                      kwargs_likelihood,
                                      kwargs_params,
                                      verbose=False,
                                      mpi=False)
        if i == 0:
            param_class = fitting_seq._updateManager.param_class
            n_params, param_class_Y_cols = param_class.num_param()
            init_pos = mcmc_utils.reorder_to_param_class(
                mcmc_Y_cols, param_class_Y_cols, init_pos, init_D_dt)
        # MCMC sample from the post-processed BNN posterior jointly with cosmology
        lens_i_start_time = time.time()
        if test_cfg.lens_posterior_type == 'default':
            test_cfg.numerics.mcmc.update(init_samples=init_pos[lens_i, :, :])
        fitting_kwargs_list_mcmc = [['MCMC', test_cfg.numerics.mcmc]]
        #with HiddenPrints():
        try:
            chain_list_mcmc = fitting_seq.fit_sequence(
                fitting_kwargs_list_mcmc)
            kwargs_result_mcmc = fitting_seq.best_fit()
        except:
            print("lens {:d} skipped".format(lens_i))
            total_progress.update(1)
            lenses_skipped.append(lens_i)
            continue
        lens_i_end_time = time.time()
        inference_time = (lens_i_end_time - lens_i_start_time) / 60.0  # min

        #############################
        # Plotting the MCMC samples #
        #############################
        # sampler_type : 'EMCEE'
        # samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]`
        # param_mcmc : list of str of length n_params, the parameter names
        sampler_type, samples_mcmc, param_mcmc, _ = chain_list_mcmc[0]
        new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain(
            kwargs_result_mcmc, samples_mcmc, kwargs_model, lens_kwargs[2],
            ps_kwargs[2], src_light_kwargs[2], special_kwargs[2],
            kwargs_constraints)
        # Plot D_dt histogram
        D_dt_samples = new_samples_mcmc['D_dt'].values
        true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.27)
        data_i['D_dt'] = true_D_dt
        # Export D_dt samples for this lens
        lens_inference_dict = dict(
            D_dt_samples=D_dt_samples,  # kappa_ext=0 for these samples
            inference_time=inference_time,
            true_D_dt=true_D_dt,
        )
        lens_inference_dict_save_path = os.path.join(
            out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i))
        np.save(lens_inference_dict_save_path, lens_inference_dict)
        # Optionally export the MCMC samples
        if test_cfg.export.mcmc_samples:
            mcmc_samples_path = os.path.join(
                out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i))
            new_samples_mcmc.to_csv(mcmc_samples_path, index=None)
        # Optionally export the D_dt histogram
        if test_cfg.export.D_dt_histogram:
            cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal(
                D_dt_samples, 3)
            _ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples,
                                                   lens_i,
                                                   true_D_dt,
                                                   save_dir=out_dir)
        # Optionally export the plot of MCMC chain
        if test_cfg.export.mcmc_chain:
            mcmc_chain_path = os.path.join(
                out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i))
            plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path)
        # Optionally export posterior cornerplot of select lens model parameters with D_dt
        if test_cfg.export.mcmc_corner:
            mcmc_corner_path = os.path.join(
                out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i))
            plotting_utils.plot_mcmc_corner(
                new_samples_mcmc[test_cfg.export.mcmc_cols], None,
                test_cfg.export.mcmc_col_labels, mcmc_corner_path)
        total_progress.update(1)
    total_progress.close()
Esempio n. 28
0
        'n_sersic': 6,
        'center_x': (x0blob + 4) * deltaPix,
        'center_y': ((60 - y0blob) + 4) * deltaPix
    })

    lens_light_params = [
        kwargs_lens_light_init, kwargs_lens_light_sigma, fixed_lens_light,
        kwargs_lower_lens_light, kwargs_upper_lens_light
    ]

    kwargs_params = {'lens_light_model': lens_light_params}

    from lenstronomy.Workflow.fitting_sequence import FittingSequence

    fitting_seqI = FittingSequence(multi_band_listI, kwargs_modelI,
                                   kwargs_constraints, kwargs_likelihood,
                                   kwargs_params)

    fitting_kwargs_list = [[
        'MCMC', {
            'n_burn': 100,
            'n_run': 100,
            'walkerRatio': 10,
            'sigma_scale': .1
        }
    ]]

    chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seqI.fit_sequence(
        fitting_kwargs_list)
    lens_result, source_result, lens_light_resultI, ps_result, cosmo_result = fitting_seqI.best_fit(
    )
def sl_sys_analysis():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        print(":Registered %d processes" % comm_size)
        args["infile"] = sys.argv[1]
        args["nimgs"] = sys.argv[2]
        args["los"] = sys.argv[3]

    args = comm.bcast(args)
    # Organize devision of strong lensing systems
    with open(args["infile"], "r") as myfile:
        limg_data = myfile.read()
    systems = json.loads(limg_data)
    sys_nr_per_proc = int(len(systems) / comm_size)
    start_sys = sys_nr_per_proc * comm_rank
    end_sys = sys_nr_per_proc * (comm_rank + 1)
    with open("../lens_catalogs_sie_only.json", "r") as myfile:
        limg_data = myfile.read()
    systems_prior = json.loads(limg_data)

    if comm_rank == 0:
        print("Each process will have %d systems" % sys_nr_per_proc)
        print("That should take app. %f min." % (sys_nr_per_proc * 20))

    results = {"D_dt": []}
    for ii in range(len(systems))[(start_sys + 2) : end_sys]:
        system = systems[ii]
        system_prior = systems_prior[ii]
        print("Analysing system ID: %d" % system["losID"])

        # the data set is
        z_lens = system_prior["zl"]
        z_source = 2.0

        # image positions units of arcsec
        # time delays units of days
        # image brightness
        # amplitude (in arbitrary linear units, not magnitudes)
        ximg = []
        yimg = []
        delay = []
        image_amps = []
        for jj in range(system["nimgs"]):
            ximg.append(system["ximg"][jj])
            yimg.append(system["yimg"][jj])
            delay.append(system["delay"][jj])
            image_amps.append(system["mags"][jj])
        ximg = np.asarray(ximg)
        yimg = np.asarray(yimg)
        # 1-sigma astrometric uncertainties of the image positions
        # (assuming equal precision)
        astrometry_sigma = 0.004
        delay = np.asarray(delay)
        d_dt = delay[1:] - delay[0]
        # 1-sigma uncertainties in the time-delay measurement (in units of days)
        d_dt_sigma = np.ones(system["nimgs"] - 1) * 2
        image_amps = np.asarray(image_amps)
        image_amps_sigma = np.ones(system["nimgs"]) * 0.3
        flux_ratios = image_amps[1:] - image_amps[0]
        flux_ratio_errors = np.ones(system["nimgs"] - 1) * 0.1

        # lens model choicers
        lens_model_list = ["SPEP", "SHEAR"]

        fixed_lens = []
        kwargs_lens_init = []
        kwargs_lens_sigma = []
        kwargs_lower_lens = []
        kwargs_upper_lens = []

        fixed_lens.append({})
        kwargs_lens_init.append(
            {
                "theta_E": 1.0,
                "gamma": 2,
                "center_x": 0,
                "center_y": 0,
                "e1": 0,
                "e2": 0.0,
            }
        )
        kwargs_lens_sigma.append(
            {
                "theta_E": 0.2,
                "e1": 0.1,
                "e2": 0.1,
                "gamma": 0.1,
                "center_x": 0.1,
                "center_y": 0.1,
            }
        )
        kwargs_lower_lens.append(
            {
                "theta_E": 0.01,
                "e1": -0.5,
                "e2": -0.5,
                "gamma": 1.5,
                "center_x": -10,
                "center_y": -10,
            }
        )
        kwargs_upper_lens.append(
            {
                "theta_E": 10,
                "e1": 0.5,
                "e2": 0.5,
                "gamma": 2.5,
                "center_x": 10,
                "center_y": 10,
            }
        )

        fixed_lens.append({"ra_0": 0, "dec_0": 0})
        kwargs_lens_init.append({"e1": 0.0, "e2": 0.0})
        kwargs_lens_sigma.append({"e1": 0.1, "e2": 0.1})
        kwargs_lower_lens.append({"e1": -0.2, "e2": -0.2})
        kwargs_upper_lens.append({"e1": 0.2, "e2": 0.2})
        lens_params = [
            kwargs_lens_init,
            kwargs_lens_sigma,
            fixed_lens,
            kwargs_lower_lens,
            kwargs_upper_lens,
        ]

        point_source_list = ["LENSED_POSITION"]
        fixed_ps = [
            {"ra_image": ximg, "dec_image": yimg}
        ]  # we fix the image position coordinates
        kwargs_ps_init = fixed_ps
        kwargs_ps_sigma = [
            {
                "ra_image": 0.01 * np.ones(len(ximg)),
                "dec_image": 0.01 * np.ones(len(ximg)),
            }
        ]
        kwargs_lower_ps = [
            {
                "ra_image": -10 * np.ones(len(ximg)),
                "dec_image": -10 * np.ones(len(ximg)),
            }
        ]
        kwargs_upper_ps = [
            {"ra_image": 10 * np.ones(len(ximg)), "dec_image": 10 * np.ones(len(ximg))}
        ]

        ps_params = [
            kwargs_ps_init,
            kwargs_ps_sigma,
            fixed_ps,
            kwargs_lower_ps,
            kwargs_upper_ps,
        ]

        # we let some freedome in how well the actual image positions are matching those
        # given by the data (indicated as 'ra_image', 'dec_image' and held fixed while fitting)
        fixed_cosmo = {}
        kwargs_cosmo_init = {
            "D_dt": 5000,
            "delta_x_image": np.zeros_like(ximg),
            "delta_y_image": np.zeros_like(ximg),
        }
        kwargs_cosmo_sigma = {
            "D_dt": 10000,
            "delta_x_image": np.ones_like(ximg) * astrometry_sigma,
            "delta_y_image": np.ones_like(ximg) * astrometry_sigma,
        }
        kwargs_lower_cosmo = {
            "D_dt": 0,
            "delta_x_image": np.ones_like(ximg) * (-1),
            "delta_y_image": np.ones_like(ximg) * (-1),
        }
        kwargs_upper_cosmo = {
            "D_dt": 10000,
            "delta_x_image": np.ones_like(ximg) * (1),
            "delta_y_image": np.ones_like(ximg) * (1),
        }
        cosmo_params = [
            kwargs_cosmo_init,
            kwargs_cosmo_sigma,
            fixed_cosmo,
            kwargs_lower_cosmo,
            kwargs_upper_cosmo,
        ]

        kwargs_params = {
            "lens_model": lens_params,
            "point_source_model": ps_params,
            "cosmography": cosmo_params,
        }

        # ## setup options for likelihood and parameter sampling
        kwargs_constraints = {
            "num_point_source_list": [4],
            # any proposed lens model must satisfy the image positions
            # appearing at the position of the point sources being sampeld
            "solver_type": "PROFILE_SHEAR",
            "cosmo_type": "D_dt",  # sampling of the time-delay distance
            # explicit modelling of the astrometric imperfection of
            # the point source positions
            "point_source_offset": True,
        }
        kwargs_likelihood = {
            "check_bounds": True,
            "point_source_likelihood": True,
            "position_uncertainty": astrometry_sigma,
            "check_solver": True,
            "solver_tolerance": 0.001,
            "time_delay_likelihood": True,
            "image_likelihood": False,
            # this needs to be explicitly given when not having imaging data
            "flux_ratio_likelihood": True,  # enables the flux ratio likelihood
        }
        kwargs_data_joint = {
            "time_delays_measured": d_dt,
            "time_delays_uncertainties": d_dt_sigma,
            "flux_ratios": flux_ratios,
            "flux_ratio_errors": flux_ratio_errors,
        }
        kwargs_model = {
            "lens_model_list": lens_model_list,
            "point_source_model_list": point_source_list,
        }

        mpi = False  # MPI possible, but not supported through that notebook.
        fitting_seq = FittingSequence(
            kwargs_data_joint,
            kwargs_model,
            kwargs_constraints,
            kwargs_likelihood,
            kwargs_params,
        )
        fitting_kwargs_list = [
            # ['update_settings', {'lens_add_fixed': [[0, ['gamma']]]}],
            ["PSO", {"sigma_scale": 1.0, "n_particles": 100, "n_iterations": 100}]
        ]

        chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(
            fitting_kwargs_list
        )
        lens_result, source_result, lens_light_result, ps_result, cosmo_result = (
            fitting_seq.best_fit()
        )

        # and now we run the MCMC
        fitting_kwargs_list = [
            ["PSO", {"sigma_scale": 0.1, "n_particles": 100, "n_iterations": 100}],
            [
                "MCMC",
                {"n_burn": 200, "n_run": 200, "walkerRatio": 10, "sigma_scale": 0.1},
            ],
        ]
        chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(
            fitting_kwargs_list
        )
        lens_result, source_result, lens_light_result, ps_result, cosmo_result = (
            fitting_seq.best_fit()
        )
        # print("number of non-linear parameters in the MCMC process: ", len(param_mcmc))
        # print("parameters in order: ", param_mcmc)
        print("number of evaluations in the MCMC process: ", np.shape(samples_mcmc)[0])

        param = Param(
            kwargs_model,
            fixed_lens,
            kwargs_fixed_ps=fixed_ps,
            kwargs_fixed_cosmo=fixed_cosmo,
            kwargs_lens_init=lens_result,
            **kwargs_constraints,
        )
        # the number of non-linear parameters and their names #
        num_param, param_list = param.num_param()

        lensAnalysis = LensAnalysis(kwargs_model)

        mcmc_new_list = []
        labels_new = [
            r"$\gamma$",
            r"$\phi_{ext}$",
            r"$\gamma_{ext}$",
            r"$D_{\Delta t}$",
        ]
        for i in range(len(samples_mcmc)):
            # transform the parameter position of the MCMC chain in a
            # lenstronomy convention with keyword arguments #
            kwargs_lens_out, kwargs_light_source_out, kwargs_light_lens_out, kwargs_ps_out, kwargs_cosmo = param.args2kwargs(
                samples_mcmc[i]
            )
            D_dt = kwargs_cosmo["D_dt"]
            gamma = kwargs_lens_out[0]["gamma"]
            phi_ext, gamma_ext = lensAnalysis._lensModelExtensions.external_shear(
                kwargs_lens_out
            )
            mcmc_new_list.append([gamma, phi_ext, gamma_ext, D_dt])

        # plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True)
        # and here the predicted angular diameter distance from a
        # default cosmology (attention for experimenter bias!)
        lensCosmo = LensCosmo(z_lens=z_lens, z_source=z_source)
        results["D_dt"].append(D_dt)

    f = h5py.File(
        "H0_"
        + args["los"]
        + "_nimgs"
        + str(args["nimgs"])
        + "_"
        + str(comm_rank)
        + ".hdf5",
        "w",
    )
    hf.create_dataset("D_dt", data=results["D_dt"])
    hf.close()