def test_dypolychord(self): fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, self.kwargs_params) fitting_list = [] kwargs_dypolychord = { 'sampler_type': 'DYPOLYCHORD', 'kwargs_run': { 'ninit': 8, 'nlive_const': 10, #'seed_increment': 1, 'resume_dyn_run': False, #'init_step': 10, }, 'polychord_settings': { 'seed': 1, #'num_repeats': 20 }, 'dypolychord_dynamic_goal': 0.8, # 1 for posterior-only, 0 for evidence-only 'remove_output_dir': True, } fitting_list.append(['nested_sampling', kwargs_dypolychord]) chain_list = fittingSequence.fit_sequence(fitting_list)
def initialize_sampler(self, walker_ratio, chains_save_path): """ Initialize the sampler to be used by run_samples. Parameters: walker_ratio (int): The number of walkers per free parameter. Must be at least 2. save_path (str): An h5 path specifying where to save the sampler chains. If a sampler chain is already present in the path it will be loaded. """ if self.image_selected is False: raise RuntimeError('Select an image before starting your sampler') # Set up the fitting sequence and fitting kwargs from lenstronomy self.walker_ratio = walker_ratio self.chains_save_path = chains_save_path ls_kwargs_data_joint = { 'multi_band_list': self.ls_multi_band_list, 'multi_band_type': 'multi-linear' } ls_kwargs_constraints = {} self.fitting_seq = FittingSequence(ls_kwargs_data_joint, self.ls_kwargs_model, ls_kwargs_constraints, self.ls_kwargs_likelihood, self.ls_kwargs_params) self.sampler_init = True
def run_mcmc(self, mcmc_numerics): """Sample from the joint likelihood """ fitting_seq = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, self.kwargs_params) fitting_kwargs_list = [['MCMC', mcmc_numerics]] #with script_utils.HiddenPrints(): chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result_mcmc = fitting_seq.best_fit() return chain_list_mcmc, kwargs_result_mcmc
def test_fitting_sequence(self): kwargs_init = [ self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps ] lens_sigma = [{ 'theta_E_sigma': 0.1, 'gamma_sigma': 0.1, 'ellipse_sigma': 0.1, 'center_x_sigma': 0.1, 'center_y_sigma': 0.1 }, { 'shear_sigma': 0.1 }] source_sigma = [{ 'R_sersic_sigma': 0.05, 'n_sersic_sigma': 0.5, 'center_x_sigma': 0.1, 'center_y_sigma': 0.1, 'ellipse_sigma': 0.1 }] lens_light_sigma = [{ 'R_sersic_sigma': 0.05, 'n_sersic_sigma': 0.5, 'center_x_sigma': 0.1, 'center_y_sigma': 0.1 }] ps_sigma = [{'pos_sigma': 1, 'point_amp_sigma': 1}] kwargs_sigma = [lens_sigma, source_sigma, lens_light_sigma, ps_sigma] kwargs_fixed = [[{}, {}], [{}], [{}], [{}]] kwargs_params = [ kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_init, kwargs_init ] image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics] multi_band_list = [image_band] fittingSequence = FittingSequence(multi_band_list, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, kwargs_params) lens_temp, source_temp, lens_light_temp, else_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fittingSequence.fit_sequence( fitting_kwargs_list=[]) npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=2) n_p = 2 n_i = 2 """
def build_fitting_seq(self): from lenstronomy.Workflow.fitting_sequence import FittingSequence self.fitting_seq = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, self.kwargs_params)
def test_get_galaxy_galaxy_recipe(self): """ Test `get_galaxy_galaxy_recipe` method. :return: :rtype: """ image = np.random.normal(size=(120, 120)) kwargs_data_joint = { 'multi_band_list': [[{ 'image_data': image, 'background_rms': 0.01, 'exposure_time': np.ones_like(image), 'ra_at_xy_0': 0., 'dec_at_xy_0': 0., 'transform_pix2angle': np.array([[-0.01, 0], [0, 0.01]]) }, {}, {}]], 'multi_band_type': 'multi-linear' } fitting_kwargs_list = self.recipe.get_galaxy_galaxy_recipe( kwargs_data_joint) assert isinstance(fitting_kwargs_list, list) # test the recipe by running it fully config = deepcopy(self.config) config.settings['model']['source_light'] = ['SHAPELETS'] recipe = Recipe(config) fitting_sequence = FittingSequence( kwargs_data_joint, config.get_kwargs_model(), config.get_kwargs_constraints(), config.get_kwargs_likelihood(), config.get_kwargs_params(), ) fitting_kwargs_list = recipe.get_recipe( kwargs_data_joint=kwargs_data_joint, recipe_name='galaxy-galaxy') fitting_sequence.fit_sequence(fitting_kwargs_list)
def test_fitting_sequence(self): #kwargs_init = [self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps] lens_sigma = [{'theta_E': 0.1, 'gamma': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1}, {'e1': 0.1, 'e2': 0.1}] lens_lower = [{'theta_E': 0., 'gamma': 1.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}, {'e1': -0.3, 'e2': -0.3}] lens_upper = [{'theta_E': 10., 'gamma': 2.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}, {'e1': 0.3, 'e2': 0.3}] source_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1, 'e1': 0.1, 'e2': 0.1}] source_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}] source_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}] lens_light_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1}] lens_light_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2}] lens_light_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2}] ps_sigma = [{'ra_source': 1, 'dec_source': 1, 'point_amp': 1}] lens_param = self.kwargs_lens, lens_sigma, [{}, {}], lens_lower, lens_upper source_param = self.kwargs_source, source_sigma, [{}], source_lower, source_upper lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{}], lens_light_lower, lens_light_upper ps_param = self.kwargs_ps, ps_sigma, [{}], self.kwargs_ps, self.kwargs_ps kwargs_params = {'lens_model': lens_param, 'source_model': source_param, 'lens_light_model': lens_light_param, 'point_source_model': ps_param, #'cosmography': cosmo_param } #kwargs_params = [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_init, kwargs_init] image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics] multi_band_list = [image_band] fittingSequence = FittingSequence(multi_band_list, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, kwargs_params) lens_temp, source_temp, lens_light_temp, else_temp, cosmo_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fittingSequence.fit_sequence(fitting_kwargs_list=[]) npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=2) n_p = 2 n_i = 2 fitting_kwargs_list = [ {'fitting_routine': 'PSO', 'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i}, {'fitting_routine': 'MCMC', 'sigma_scale': 0.1, 'n_burn': 1, 'n_run': 1, 'walkerRatio': 2}, {'fitting_routine': 'align_images', 'lower_limit_shift': -0.1, 'upper_limit_shift': 0.1, 'n_particles': 2, 'n_iterations': 2}, {'fitting_routine': 'psf_iteration', 'psf_iter_num': 2, 'psf_iter_factor': 0.5, 'kwargs_psf_iter': {'stacking_option': 'mean'}} ] lens_temp, source_temp, lens_light_temp, else_temp, cosmo_temp, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fittingSequence.fit_sequence(fitting_kwargs_list=fitting_kwargs_list) npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=1)
def test_nautilus(self): kwargs_params = copy.deepcopy(self.kwargs_params) fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, kwargs_params) fitting_list = [] kwargs_nautilus = { 'prior_type': 'uniform', 'thread_count': 1, 'verbose': True, 'one_step': True, 'n_live': 2, 'random_state': 42 } fitting_list.append(['Nautilus', kwargs_nautilus]) chain_list = fittingSequence.fit_sequence(fitting_list)
def __init__(self, kwargs_data_joint, kwargs_model,lens_params,source_params, lenslight_params=None, kwargs_constraints=None, kwargs_likelihood=None): """ class to manage cluster source reconstruction. This class inherited the FittingSequence class in Workflow module of lenstronomy. :param kwargs_data_joint: keywords arguments of [data, psf, numericals] in lenstronomy convention. :param kwargs_model: name of model list :param lens_params: lens model keywords arguments [kwargs_lens_init, kwargs_lens_sigma, kwargs_fixed_lens, kwargs_lower_lens, kwargs_upper_lens] :param source_params: source model keywords arguments [kwargs_source_init, kwargs_source_sigma, kwargs_fixed_source, kwargs_lower_source, kwargs_upper_source] :param kwargs_constraints: contraints on models :param kwargs_likelihood: options of calculating likelihood, see more: LikelihoodModule class in Sampling module of lenstronomy. """ self.kwargs_data_joint =kwargs_data_joint self.multi_band_list = kwargs_data_joint.get('multi_band_list', []) self.kwargs_model =kwargs_model kwargs_params = {'lens_model': lens_params, 'source_model': source_params, 'lens_light_model': lenslight_params} self.kwargs_params= kwargs_params if kwargs_constraints is None: kwargs_constraints ={} if kwargs_likelihood is None: kwargs_likelihood = {'source_marg': False, 'check_positive_flux': True} self.fitting_seq_src = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)
def test_dynesty(self): kwargs_params = copy.deepcopy(self.kwargs_params) kwargs_params['lens_model'][0][0]['theta_E'] += 0.01 fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, kwargs_params) fitting_list = [] kwargs_dynesty = { 'sampler_type': 'DYNESTY', 'kwargs_run': { 'dlogz_init': 0.01, 'nlive_init': 6, 'nlive_batch': 6, 'maxbatch': 1, }, } fitting_list.append(['nested_sampling', kwargs_dynesty]) chain_list = fittingSequence.fit_sequence(fitting_list)
def test_multinest(self): # Nested sampler tests # further decrease the parameter space for nested samplers to run faster fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, self.kwargs_params) fitting_list = [] kwargs_update = { 'ps_add_fixed': [[0, ['ra_source', 'dec_source'], [0, 0]]], 'lens_light_add_fixed': [[ 0, ['n_sersic', 'R_sersic', 'center_x', 'center_y'], [4, .1, 0, 0] ]], 'source_add_fixed': [[ 0, ['R_sersic', 'e1', 'e2', 'center_x', 'center_y'], [.6, .1, .1, 0, 0] ]], 'lens_add_fixed': [[ 0, ['gamma', 'theta_E', 'e1', 'e2', 'center_x', 'center_y'], [1.8, 1., .1, .1, 0, 0] ], [1, ['gamma1', 'gamma2'], [0.01, 0.01]]], 'change_source_lower_limit': [[0, ['n_sersic'], [2.9]]], 'change_source_upper_limit': [[0, ['n_sersic'], [3.1]]] } fitting_list.append(['update_settings', kwargs_update]) kwargs_multinest = { 'sampler_type': 'MULTINEST', 'kwargs_run': { 'n_live_points': 10, 'evidence_tolerance': 0.5, 'sampling_efficiency': 0.8, # 1 for posterior-only, 0 for evidence-only 'importance_nested_sampling': False, 'multimodal': True, 'const_efficiency_mode': False, # reduce sampling_efficiency to 5% when True }, 'remove_output_dir': True, } fitting_list.append(['nested_sampling', kwargs_multinest]) chain_list2 = fittingSequence.fit_sequence(fitting_list) kwargs_fixed = fittingSequence._updateManager.fixed_kwargs npt.assert_almost_equal(kwargs_fixed[0][1]['gamma1'], 0.01, decimal=2) assert fittingSequence._updateManager._lower_kwargs[1][0][ 'n_sersic'] == 2.9 assert fittingSequence._updateManager._upper_kwargs[1][0][ 'n_sersic'] == 3.1 kwargs_test = {'kwargs_lens': 1} fittingSequence.update_state(kwargs_test) kwargs_out = fittingSequence.best_fit(bijective=True) assert kwargs_out['kwargs_lens'] == 1
def test_zeus(self): # we make a very basic lens+source model to feed to check zeus can be run through fitting sequence # we don't use the kwargs defined in setup() as those are modified during the tests; using unique kwargs here is safer # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 10 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) fwhm = 0.5 # full width half max of PSF # PSF specification kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) data_class = ImageData(**kwargs_data) kwargs_psf_gaussian = { 'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3 } psf_gaussian = PSF(**kwargs_psf_gaussian) kwargs_psf = { 'psf_type': 'PIXEL', 'kernel_point_source': psf_gaussian.kernel_point_source, 'psf_error_map': np.zeros_like(psf_gaussian.kernel_point_source) } psf_class = PSF(**kwargs_psf) # make a lens lens_model_list = ['EPL'] kwargs_epl = { 'theta_E': 0.6, 'gamma': 2.6, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.1, 'e2': 0.1 } kwargs_lens = [kwargs_epl] lens_model_class = LensModel(lens_model_list=lens_model_list) # make a source source_model_list = ['SERSIC_ELLIPSE'] kwargs_sersic_ellipse = { 'amp': 1., 'R_sersic': 0.6, 'n_sersic': 3, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.1, 'e2': 0.1 } kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) kwargs_numerics = { 'supersampling_factor': 1, 'supersampling_convolution': False } imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, kwargs_numerics=kwargs_numerics) image_sim = sim_util.simulate_simple(imageModel, kwargs_lens, kwargs_source) data_class.update_data(image_sim) kwargs_data['image_data'] = image_sim kwargs_model = { 'lens_model_list': lens_model_list, 'source_light_model_list': source_model_list } lens_fixed = [{}] lens_sigma = [{ 'theta_E': 0.1, 'gamma': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1 }] lens_lower = [{ 'theta_E': 0., 'gamma': 1.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4 }] lens_upper = [{ 'theta_E': 10., 'gamma': 2.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4 }] source_fixed = [{}] source_sigma = [{ 'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1, 'e1': 0.1, 'e2': 0.1 }] source_lower = [{ 'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4 }] source_upper = [{ 'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4 }] lens_param = [ kwargs_lens, lens_sigma, lens_fixed, lens_lower, lens_upper ] source_param = [ kwargs_source, source_sigma, source_fixed, source_lower, source_upper ] kwargs_params = { 'lens_model': lens_param, 'source_model': source_param } kwargs_constraints = {} multi_band_list = [[kwargs_data, kwargs_psf, kwargs_numerics]] kwargs_data_joint = { 'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear' } kwargs_likelihood = {'source_marg': True} fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) fitting_list = [] kwargs_zeus = { 'sampler_type': 'ZEUS', 'n_burn': 2, 'n_run': 2, 'walkerRatio': 4 } fitting_list.append(['MCMC', kwargs_zeus]) chain_list = fittingSequence.fit_sequence(fitting_list)
kwargs_data_joint = { 'time_delays_measured': d_dt, 'time_delays_uncertainties': d_dt_sigma, 'flux_ratios': flux_ratios, 'flux_ratio_errors': flux_ratio_errors } kwargs_model = { 'lens_model_list': lens_model_list, 'point_source_model_list': point_source_list } mpi = False # MPI possible, but not supported through that notebook. from lenstronomy.Workflow.fitting_sequence import FittingSequence fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) fitting_kwargs_list = [ #['update_settings', {'lens_add_fixed': [[0, ['gamma']]]}], ['PSO', { 'sigma_scale': 1., 'n_particles': 100, 'n_iterations': 100 }], ] start_time = time.time() chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence( fitting_kwargs_list) lens_result, source_result, lens_light_result, ps_result, cosmo_result = fitting_seq.best_fit( ) end_time = time.time()
def make_lensmodel(lens_info, theta_E, source_info, box_f): # lens data specifics lens_image = lens_info['image'] psf_lens = lens_info['psf'] background_rms = background_rms_image(5, lens_image) exposure_time = 100 kwargs_data_lens = sim_util.data_configure_simple(len(lens_image), lens_info['deltapix'], exposure_time, background_rms) kwargs_data_lens['image_data'] = lens_image data_class_lens = ImageData(**kwargs_data_lens) #PSF kwargs_psf_lens = { 'psf_type': 'PIXEL', 'pixel_size': lens_info['deltapix'], 'kernel_point_source': psf_lens } psf_class_lens = PSF(**kwargs_psf_lens) # lens light model lens_light_model_list = ['SERSIC_ELLIPSE'] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) kwargs_model = {'lens_light_model_list': lens_light_model_list} kwargs_numerics_galfit = {'supersampling_factor': 1} kwargs_constraints = {} kwargs_likelihood = {'check_bounds': True} image_band = [kwargs_data_lens, kwargs_psf_lens, kwargs_numerics_galfit] multi_band_list = [image_band] kwargs_data_joint = { 'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear' } # Sersic component fixed_lens_light = [{}] kwargs_lens_light_init = [{ 'R_sersic': .1, 'n_sersic': 4, 'e1': 0, 'e2': 0, 'center_x': 0, 'center_y': 0 }] kwargs_lens_light_sigma = [{ 'n_sersic': 0.5, 'R_sersic': 0.2, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1 }] kwargs_lower_lens_light = [{ 'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -10, 'center_y': -10 }] kwargs_upper_lens_light = [{ 'e1': 0.5, 'e2': 0.5, 'R_sersic': 10, 'n_sersic': 8, 'center_x': 10, 'center_y': 10 }] lens_light_params = [ kwargs_lens_light_init, kwargs_lens_light_sigma, fixed_lens_light, kwargs_lower_lens_light, kwargs_upper_lens_light ] kwargs_params = {'lens_light_model': lens_light_params} fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) fitting_kwargs_list = [[ 'PSO', { 'sigma_scale': 1., 'n_particles': 50, 'n_iterations': 50 } ]] chain_list = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() modelPlot = ModelPlot(multi_band_list, kwargs_model, kwargs_result) # Lens light best result kwargs_light_lens = kwargs_result['kwargs_lens_light'][0] #Lens model kwargs_lens_list = [{ 'theta_E': theta_E, 'e1': kwargs_light_lens['e1'], 'e2': kwargs_light_lens['e2'], 'center_x': kwargs_light_lens['center_x'], 'center_y': kwargs_light_lens['center_y'] }] lensModel = LensModel(['SIE']) lme = LensModelExtensions(lensModel) #random position for the source x_crit_list, y_crit_list = lme.critical_curve_tiling( kwargs_lens_list, compute_window=(len(source_info['image'])) * (source_info['deltapix']), start_scale=source_info['deltapix'], max_order=10) if len(x_crit_list) > 2 and len(y_crit_list) > 2: x_caustic_list, y_caustic_list = lensModel.ray_shooting( x_crit_list, y_crit_list, kwargs_lens_list) xsamp0 = np.arange( min(x_caustic_list) - min(x_caustic_list) * box_f[0], max(x_caustic_list) + max(x_caustic_list) * box_f[1], 0.1) xsamp = xsamp0[abs(xsamp0.round(1)) != 0.1] ysamp0 = np.arange( min(y_caustic_list) - min(y_caustic_list) * box_f[0], max(y_caustic_list) + max(y_caustic_list) * box_f[1], 0.1) ysamp = ysamp0[abs(ysamp0.round(1)) != 0.1] if len(xsamp) == 0 or len(ysamp) == 0: x_shift, y_shift = 0.15, 0.15 #arcseconds else: y_shift = rand.sample(list(ysamp), 1)[0] x_shift = rand.sample(list(xsamp), 1)[0] else: x_shift, y_shift = -0.15, 0.15 #arcseconds x_caustic_list = [0] y_caustic_list = [0] solver = LensEquationSolver(lensModel) theta_ra, theta_dec = solver.image_position_from_source( x_shift, y_shift, kwargs_lens_list) if len(theta_ra) <= 1: x_shift, y_shift = -0.2, -0.2 #arcseconds1 if abs(x_shift) >= int(theta_E) or abs(y_shift) >= int(theta_E): x_shift, y_shift = 0.3, -0.3 print('BLABLA') print('HERE', min(x_caustic_list) - min(x_caustic_list) * box_f[0], max(x_caustic_list) + max(x_caustic_list) * box_f[1], min(y_caustic_list) - min(y_caustic_list) * box_f[0], max(y_caustic_list) + max(y_caustic_list) * box_f[1]) return { 'lens_light_model_list': ['SERSIC_ELLIPSE'], 'kwargs_light_lens': [kwargs_light_lens], 'lens_light_model_class': lens_light_model_class, 'kwargs_lens_list': kwargs_lens_list, 'kwargs_data_lens': kwargs_data_lens, 'source_shift': [x_shift, y_shift] }
def test_fitting_sequence(self): fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, self.kwargs_params) kwargs_result = fittingSequence.best_fit(bijective=False) lens_temp = kwargs_result['kwargs_lens'] npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=2) logL = fittingSequence.best_fit_likelihood print(logL, 'test') #print(lens_temp, source_temp, lens_light_temp, ps_temp, cosmo_temp) assert logL < 0 bic = fittingSequence.bic assert bic > 0 #npt.assert_almost_equal(bic, 20000000220.29376, decimal=-4) #npt.assert_almost_equal(logL, -10000000061.792593, decimal=-4) n_p = 2 n_i = 2 fitting_list = [] kwargs_pso = { 'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i } fitting_list.append(['PSO', kwargs_pso]) kwargs_align = { 'lowerLimit': -0.1, 'upperLimit': 0.1, 'n_particles': 2, 'n_iterations': 2 } fitting_list.append(['align_images', kwargs_align]) kwargs_psf_iter = { 'num_iter': 2, 'psf_iter_factor': 0.5, 'stacking_method': 'mean', 'new_procedure': False } fitting_list.append(['psf_iteration', kwargs_psf_iter]) fitting_list.append(['restart', None]) fitting_list.append(['fix_not_computed', {'free_bands': [True]}]) n_sersic_overwrite = 4 kwargs_update = { 'lens_light_add_fixed': [[0, ['n_sersic'], [n_sersic_overwrite]]], 'lens_light_remove_fixed': [[0, ['center_x']]], 'change_source_lower_limit': [[0, ['n_sersic'], [0.1]]], 'change_source_upper_limit': [[0, ['n_sersic'], [10]]] } fitting_list.append(['update_settings', kwargs_update]) chain_list = fittingSequence.fit_sequence(fitting_list) lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed, extinction_fixed = fittingSequence._updateManager.fixed_kwargs kwargs_result = fittingSequence.best_fit(bijective=False) npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=1) npt.assert_almost_equal( fittingSequence._updateManager._lens_light_fixed[0]['n_sersic'], n_sersic_overwrite, decimal=8) npt.assert_almost_equal(lens_light_fixed[0]['n_sersic'], 4, decimal=-1) assert fittingSequence._updateManager._lower_kwargs[1][0][ 'n_sersic'] == 0.1 assert fittingSequence._updateManager._upper_kwargs[1][0][ 'n_sersic'] == 10 # test 'set_param_value' fitting sequence fitting_list = [[ 'set_param_value', { 'lens': [[1, ['gamma1'], [0.013]]] } ], ['set_param_value', { 'lens_light': [[0, ['center_x'], [0.009]]] }], ['set_param_value', { 'source': [[0, ['n_sersic'], [2.993]]] }], ['set_param_value', { 'ps': [[0, ['ra_source'], [0.007]]] }]] fittingSequence.fit_sequence(fitting_list) kwargs_set = fittingSequence._updateManager.parameter_state assert kwargs_set['kwargs_lens'][1]['gamma1'] == 0.013 assert kwargs_set['kwargs_lens_light'][0]['center_x'] == 0.009 assert kwargs_set['kwargs_source'][0]['n_sersic'] == 2.993 assert kwargs_set['kwargs_ps'][0]['ra_source'] == 0.007
def sl_sys_analysis(): # Get command line arguments args = {} if comm_rank == 0: print(":Registered %d processes" % comm_size) args["infile"] = sys.argv[1] args["nimgs"] = sys.argv[2] args["los"] = sys.argv[3] args["version"] = sys.argv[4] args["dt_sigma"] = float(sys.argv[5]) args["image_amps_sigma"] = float(sys.argv[6]) args["flux_ratio_errors"] = float(sys.argv[7]) args["astrometry_sigma"] = float(sys.argv[8]) args = comm.bcast(args) # Organize devision of strong lensing systems with open(args["infile"], "r") as myfile: limg_data = myfile.read() systems = json.loads(limg_data) sys_nr_per_proc = int(len(systems) / comm_size) print("comm_rank", comm_rank) start_sys = sys_nr_per_proc * comm_rank end_sys = sys_nr_per_proc * (comm_rank + 1) print(start_sys, end_sys) with open("../lens_catalogs_sie_only.json", "r") as myfile: limg_data = myfile.read() systems_prior = json.loads(limg_data) if comm_rank == 0: print("Each process will have %d systems" % sys_nr_per_proc) print("That should take app. %f min." % (sys_nr_per_proc * 20)) source_size_pc = 10.0 window_size = 0.1 # units of arcseconds grid_number = 100 # supersampled window (per axis) z_source = 2.0 cosmo = FlatLambdaCDM(H0=71, Om0=0.3089, Ob0=0.0) results = {"gamma": [], "phi_ext": [], "gamma_ext": [], "theta_E": [], "D_dt": []} for ii in range(len(systems))[(start_sys + 2) : end_sys]: system = systems[ii] system_prior = systems_prior[ii] print("Analysing system ID: %d" % ii) # the data set is z_lens = system_prior["zl"] lensCosmo = LensCosmo(cosmo=cosmo, z_lens=z_lens, z_source=z_source) # convert units of pc into arcseconds D_s = lensCosmo.D_s source_size_arcsec = source_size_pc / 10 ** 6 / D_s / constants.arcsec print("The source size in arcsec init = %.4f" % source_size_arcsec) #0.0012 # multiple images properties ximg = np.zeros(system["nimgs"]) yimg = np.zeros(system["nimgs"]) t_days = np.zeros(system["nimgs"]) image_amps = np.zeros(system["nimgs"]) for jj in range(system["nimgs"]): ximg[jj] = system["ximg"][jj] # [arcsec] yimg[jj] = system["yimg"][jj] # [arcsec] t_days[jj] = system["delay"][jj] # [days] image_amps[jj] = system["mags"][jj] # [linear units or magnitudes] # sort by arrival time index_sort = np.argsort(t_days) ximg = ximg[index_sort] # relative RA (arc seconds) yimg = yimg[index_sort] # relative DEC (arc seconds) image_amps = np.abs(image_amps[index_sort]) t_days = t_days[index_sort] d_dt = t_days[1:] - t_days[0] # measurement uncertainties astrometry_sigma = args["astrometry_sigma"] ximg_measured = ximg + np.random.normal(0, astrometry_sigma, system["nimgs"]) yimg_measured = yimg + np.random.normal(0, astrometry_sigma, system["nimgs"]) image_amps_sigma = np.ones(system["nimgs"]) * args["image_amps_sigma"] flux_ratios = image_amps[1:] - image_amps[0] flux_ratio_errors = np.ones(system["nimgs"] - 1) * args["flux_ratio_errors"] flux_ratios_measured = flux_ratios + np.random.normal(0, flux_ratio_errors) d_dt_sigma = np.ones(system["nimgs"] - 1) * args["dt_sigma"] d_dt_measured = d_dt + np.random.normal(0, d_dt_sigma) kwargs_data_joint = { "time_delays_measured": d_dt_measured, "time_delays_uncertainties": d_dt_sigma, "flux_ratios": flux_ratios_measured, "flux_ratio_errors": flux_ratio_errors, "ra_image_list": [ximg_measured], "dec_image_list": [yimg_measured], } # lens model choices lens_model_list = ["SPEMD", "SHEAR_GAMMA_PSI"] # 1. layer: primary SPEP fixed_lens = [] kwargs_lens_init = [] kwargs_lens_sigma = [] kwargs_lower_lens = [] kwargs_upper_lens = [] fixed_lens.append({}) kwargs_lens_init.append( { "theta_E": 1.0, "gamma": 2, "center_x": 0, "center_y": 0, "e1": 0, "e2": 0.0, } ) # error kwargs_lens_sigma.append( { "theta_E": 0.2, "e1": 0.1, "e2": 0.1, "gamma": 0.1, "center_x": 0.1, "center_y": 0.1, } ) # lower limit kwargs_lower_lens.append( { "theta_E": 0.01, "e1": -0.5, "e2": -0.5, "gamma": 1.5, "center_x": -10, "center_y": -10, } ) # upper limit kwargs_upper_lens.append( { "theta_E": 10, "e1": 0.5, "e2": 0.5, "gamma": 2.5, "center_x": 10, "center_y": 10, } ) # 2nd layer: external SHEAR fixed_lens.append({"ra_0": 0, "dec_0": 0}) kwargs_lens_init.append({"gamma_ext": 0.05, "psi_ext": 0.0}) kwargs_lens_sigma.append({"gamma_ext": 0.05, "psi_ext": np.pi}) kwargs_lower_lens.append({"gamma_ext": 0, "psi_ext": -np.pi}) kwargs_upper_lens.append({"gamma_ext": 0.3, "psi_ext": np.pi}) # 3rd layer: external CONVERGENCE kwargs_lens_init.append({'kappa_ext': 0.12}) kwargs_lens_sigma.append({'kappa_ext': 0.06}) kwargs_lower_lens.append({'kappa_ext': 0.0}) kwargs_upper_lens.append({'kappa_ext': 0.3}) # combined lens model lens_params = [ kwargs_lens_init, kwargs_lens_sigma, fixed_lens, kwargs_lower_lens, kwargs_upper_lens, ] # image position parameters point_source_list = ["LENSED_POSITION"] # we fix the image position coordinates fixed_ps = [{}] # the initial guess for the appearing image positions is: # at the image position. kwargs_ps_init = [{"ra_image": ximg, "dec_image": yimg}] # let some freedome in how well the actual image positions are # matching those given by the data (indicated as 'ra_image', 'dec_image' # and held fixed while fitting) kwargs_ps_sigma = [ { "ra_image": 0.01 * np.ones(len(ximg)), "dec_image": 0.01 * np.ones(len(ximg)), } ] kwargs_lower_ps = [ { "ra_image": -10 * np.ones(len(ximg)), "dec_image": -10 * np.ones(len(ximg)), } ] kwargs_upper_ps = [ {"ra_image": 10 * np.ones(len(ximg)), "dec_image": 10 * np.ones(len(ximg))} ] ps_params = [ kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps, ] # quasar source size fixed_special = {} kwargs_special_init = {} kwargs_special_sigma = {} kwargs_lower_special = {} kwargs_upper_special = {} fixed_special["source_size"] = source_size_arcsec kwargs_special_init["source_size"] = source_size_arcsec kwargs_special_sigma["source_size"] = source_size_arcsec kwargs_lower_special["source_size"] = 0.0001 kwargs_upper_special["source_size"] = 1 # Time-delay distance kwargs_special_init["D_dt"] = 4300 # corresponds to H0 ~ 70 kwargs_special_sigma["D_dt"] = 3000 kwargs_lower_special["D_dt"] = 2500 # corresponds to H0 ~ 120 kwargs_upper_special["D_dt"] = 14000 # corresponds to H0 ~ 20 special_params = [ kwargs_special_init, kwargs_special_sigma, fixed_special, kwargs_lower_special, kwargs_upper_special, ] # combined parameter settings kwargs_params = { "lens_model": lens_params, "point_source_model": ps_params, "special": special_params, } # our model choices kwargs_model = { "lens_model_list": lens_model_list, "point_source_model_list": point_source_list, } lensModel = LensModel(kwargs_model["lens_model_list"]) lensModelExtensions = LensModelExtensions(lensModel=lensModel) lensEquationSolver = LensEquationSolver(lensModel=lensModel) # setup options for likelihood and parameter sampling time_delay_likelihood = True flux_ratio_likelihood = True image_position_likelihood = True kwargs_flux_compute = { "source_type": "INF", "window_size": window_size, "grid_number": grid_number, } kwargs_constraints = { "num_point_source_list": [int(args["nimgs"])], # any proposed lens model must satisfy the image positions # appearing at the position of the point sources being sampeld # "solver_type": "PROFILE_SHEAR", "Ddt_sampling": time_delay_likelihood, # sampling of the time-delay distance # explicit modelling of the astrometric imperfection of # the point source positions "point_source_offset": True, } # explicit sampling of finite source size parameter # (only use when source_type='GAUSSIAN' or 'TORUS') if ( kwargs_flux_compute["source_type"] in ["GAUSSIAN", "TORUS"] and flux_ratio_likelihood is True ): kwargs_constraints["source_size"] = True # e.g. power-law mass slope of the main deflector # [[index_model, 'param_name', mean, 1-sigma error], [...], ...] prior_lens = [[0, "gamma", 2, 0.1]] prior_special = [] kwargs_likelihood = { "position_uncertainty": args["astrometry_sigma"], "source_position_likelihood": True, "image_position_likelihood": True, "time_delay_likelihood": True, "flux_ratio_likelihood": True, "kwargs_flux_compute": kwargs_flux_compute, "prior_lens": prior_lens, "prior_special": prior_special, "check_solver": True, "solver_tolerance": 0.001, "check_bounds": True, } fitting_seq = FittingSequence( kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, ) fitting_kwargs_list = [ ["PSO", {"sigma_scale": 1.0, "n_particles": 200, "n_iterations": 500}] ] chain_list_pso = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() kwargs_result = fitting_seq.best_fit(bijective=True) args_result = fitting_seq.param_class.kwargs2args(**kwargs_result) logL, _ = fitting_seq.likelihoodModule.logL(args_result, verbose=True) # and now we run the MCMC fitting_kwargs_list = [ [ "MCMC", {"n_burn": 400, "n_run": 600, "walkerRatio": 10, "sigma_scale": 0.1}, ] ] chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() # print("number of non-linear parameters in the MCMC process: ", len(param_mcmc)) # print("parameters in order: ", param_mcmc) print("number of evaluations in the MCMC process: ", np.shape(samples_mcmc)[0]) param = Param( kwargs_model, fixed_lens, kwargs_fixed_ps=fixed_ps, kwargs_fixed_special=fixed_special, kwargs_lens_init=kwargs_result["kwargs_lens"], **kwargs_constraints, ) # the number of non-linear parameters and their names # num_param, param_list = param.num_param() for i in range(len(samples_mcmc)): kwargs_out = param.args2kwargs(samples_mcmc[i]) kwargs_lens_out, kwargs_special_out, kwargs_ps_out = ( kwargs_out["kwargs_lens"], kwargs_out["kwargs_special"], kwargs_out["kwargs_ps"], ) # compute 'real' image position adding potential astrometric shifts x_pos = kwargs_ps_out[0]["ra_image"] y_pos = kwargs_ps_out[0]["dec_image"] # extract quantities of the main deflector theta_E = kwargs_lens_out[0]["theta_E"] gamma = kwargs_lens_out[0]["gamma"] e1, e2 = kwargs_lens_out[0]["e1"], kwargs_lens_out[0]["e2"] phi, q = param_util.ellipticity2phi_q(e1, e2) phi_ext, gamma_ext = ( kwargs_lens_out[1]["psi_ext"] % np.pi, kwargs_lens_out[1]["gamma_ext"], ) if flux_ratio_likelihood is True: mag = lensModel.magnification(x_pos, y_pos, kwargs_lens_out) flux_ratio_fit = mag[1:] / mag[0] if ( kwargs_constraints.get("source_size", False) is True and "source_size" not in fixed_special ): source_size = kwargs_special_out["source_size"] if time_delay_likelihood is True: D_dt = kwargs_special_out["D_dt"] # and here the predicted angular diameter distance from a # default cosmology (attention for experimenter bias!) gamma = np.median(gamma) phi_ext = np.median(phi_ext) gamma_ext = np.median(gamma_ext) theta_E = np.median(theta_E) D_dt = np.median(D_dt) results["gamma"].append(gamma) results["phi_ext"].append(phi_ext) results["gamma_ext"].append(gamma_ext) results["theta_E"].append(theta_E) results["H0"].append(c_light / D_dt)
'force_no_add_image': False, 'source_marg': False, 'image_likelihood': True, 'point_source_likelihood': False, 'position_uncertainty': 0.004, 'check_solver': True, 'solver_tolerance': 0.001, # 'check_positive_flux': True, } image_band = [kwargs_data, kwargs_psf, kwargs_numerics] multi_band_list = [image_band] fittingSeq = FittingSequence(multi_band_list, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) kwargs_pso = { 'fitting_routine': 'PSO', 'mpi': False, 'n_particles': 50, 'n_iterations': 100, 'sigma_scale': 1, } ''' fitting_kwargs_list = [kwargs_pso] lens_result, source_light_result, lens_light_result, ps_result, \ \ cosmo_result, chain_list, param_list, \ \
True, # evaluates how close the different image positions match the source positons 'image_position_likelihood': True, # evaluate point source likelihood given the measured image positions 'time_delay_likelihood': time_delay_likelihood, # evaluating the time-delay likelihood #'prior_lens': prior_lens, # 'prior_special': prior_special, 'check_solver': True, # check non-linear solver and disgard non-solutions 'solver_tolerance': 0.001, 'check_bounds': True, # check parameter bounds and punish them } from lenstronomy.Workflow.fitting_sequence import FittingSequence fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) if fix_gamma == True: result = pickle.load( open(folder + 'sampler_results_SIE#{0}.pkl'.format(seed - 210), 'rb')) elif fix_gamma == False: result = pickle.load( open( folder + 'sampler_results_SPEMD#{0}.pkl'.format(seed - 210), 'rb')) fit_result, trans_result = result kwargs_result, chain_list_mcmc, chain_list_pso = fit_result args_result = fitting_seq.param_class.kwargs2args(**kwargs_result) mcmc_new_list, labels_new = trans_result logL = fitting_seq.likelihoodModule.logL(args_result, verbose=True)
def sl_sys_analysis(): # Get command line arguments args = {} if comm_rank == 0: print(":Registered %d processes" % comm_size) args["infile"] = sys.argv[1] args["nimgs"] = sys.argv[2] args["los"] = sys.argv[3] args["version"] = sys.argv[4] args["dt_sigma"] = float(sys.argv[5]) args["image_amps_sigma"] = float(sys.argv[6]) args["flux_ratio_errors"] = float(sys.argv[7]) args["astrometry_sigma"] = float(sys.argv[8]) args = comm.bcast(args) # Organize devision of strong lensing systems with open(args["infile"], "r") as myfile: limg_data = myfile.read() systems = json.loads(limg_data) sys_nr_per_proc = int(len(systems) / comm_size) print('comm_rank', comm_rank) start_sys = sys_nr_per_proc * comm_rank end_sys = sys_nr_per_proc * (comm_rank + 1) print(start_sys, end_sys) with open("../lens_catalogs_sie_only.json", "r") as myfile: limg_data = myfile.read() systems_prior = json.loads(limg_data) if comm_rank == 0: print("Each process will have %d systems" % sys_nr_per_proc) print("That should take app. %f min." % (sys_nr_per_proc * 20)) results = { "gamma": [], "phi_ext": [], "gamma_ext": [], "theta_E": [], "D_dt": [], } for ii in range(len(systems))[(start_sys + 2):end_sys]: system = systems[ii] system_prior = systems_prior[ii] print("Analysing system ID: %d" % ii) print(system) # the data set is z_lens = system_prior["zl"] z_source = 2.0 # multiple images properties ximg = np.zeros(system["nimgs"]) yimg = np.zeros(system["nimgs"]) delay = np.zeros(system["nimgs"]) image_amps = np.zeros(system["nimgs"]) for jj in range(system["nimgs"]): ximg[jj] = system["ximg"][jj] #[arcsec] yimg[jj] = system["yimg"][jj] #[arcsec] delay[jj] = system["delay"][jj] #[days] image_amps[jj] = system["mags"][jj] #[linear units or magnitudes] # sort by arrival time index_sort = np.argsort(delay) ximg = ximg[index_sort] yimg = yimg[index_sort] delay = delay[index_sort] image_amps = image_amps[index_sort] d_dt = delay[1:] - delay[0] # measurement uncertainties d_dt_sigma = np.ones(system["nimgs"] - 1) * args["dt_sigma"] image_amps_sigma = np.ones(system["nimgs"]) * args["image_amps_sigma"] flux_ratios = image_amps[1:] - image_amps[0] flux_ratio_errors = np.ones(system["nimgs"] - 1) * args["flux_ratio_errors"] # lens model choices lens_model_list = ["SPEP", "SHEAR"] # first choice: SPEP fixed_lens = [] kwargs_lens_init = [] kwargs_lens_sigma = [] kwargs_lower_lens = [] kwargs_upper_lens = [] fixed_lens.append({}) kwargs_lens_init.append({ "theta_E": 1.0, "gamma": 2, "center_x": 0, "center_y": 0, "e1": 0, "e2": 0.0, }) # error kwargs_lens_sigma.append({ "theta_E": 0.2, "e1": 0.1, "e2": 0.1, "gamma": 0.1, "center_x": 0.1, "center_y": 0.1, }) # lower limit kwargs_lower_lens.append({ "theta_E": 0.01, "e1": -0.5, "e2": -0.5, "gamma": 1.5, "center_x": -10, "center_y": -10, }) # upper limit kwargs_upper_lens.append({ "theta_E": 10, "e1": 0.5, "e2": 0.5, "gamma": 2.5, "center_x": 10, "center_y": 10, }) # second choice: SHEAR fixed_lens.append({"ra_0": 0, "dec_0": 0}) kwargs_lens_init.append({"e1": 0.0, "e2": 0.0}) kwargs_lens_sigma.append({"e1": 0.1, "e2": 0.1}) kwargs_lower_lens.append({"e1": -0.2, "e2": -0.2}) kwargs_upper_lens.append({"e1": 0.2, "e2": 0.2}) lens_params = [ kwargs_lens_init, kwargs_lens_sigma, fixed_lens, kwargs_lower_lens, kwargs_upper_lens, ] point_source_list = ["LENSED_POSITION"] fixed_ps = [{"ra_image": ximg, "dec_image": yimg}] kwargs_ps_init = fixed_ps # let some freedome in how well the actual image positions are # matching those given by the data (indicated as 'ra_image', 'dec_image' # and held fixed while fitting) kwargs_ps_sigma = [{ "ra_image": 0.01 * np.ones(len(ximg)), "dec_image": 0.01 * np.ones(len(ximg)), }] kwargs_lower_ps = [{ "ra_image": -10 * np.ones(len(ximg)), "dec_image": -10 * np.ones(len(ximg)), }] kwargs_upper_ps = [{ "ra_image": 10 * np.ones(len(ximg)), "dec_image": 10 * np.ones(len(ximg)) }] ps_params = [ kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps, ] fixed_cosmo = {} kwargs_cosmo_init = { "D_dt": 5000, "delta_x_image": np.zeros_like(ximg), "delta_y_image": np.zeros_like(ximg), } kwargs_cosmo_sigma = { "D_dt": 10000, "delta_x_image": np.ones_like(ximg) * args["astrometry_sigma"], "delta_y_image": np.ones_like(ximg) * args["astrometry_sigma"], } kwargs_lower_cosmo = { "D_dt": 0, "delta_x_image": np.ones_like(ximg) * (-1), "delta_y_image": np.ones_like(ximg) * (-1), } kwargs_upper_cosmo = { "D_dt": 10000, "delta_x_image": np.ones_like(ximg) * (1), "delta_y_image": np.ones_like(ximg) * (1), } cosmo_params = [ kwargs_cosmo_init, kwargs_cosmo_sigma, fixed_cosmo, kwargs_lower_cosmo, kwargs_upper_cosmo, ] kwargs_params = { "lens_model": lens_params, "point_source_model": ps_params, "cosmography": cosmo_params, } # setup options for likelihood and parameter sampling kwargs_constraints = { "num_point_source_list": [int(args["nimgs"])], # any proposed lens model must satisfy the image positions # appearing at the position of the point sources being sampeld "solver_type": "PROFILE_SHEAR", "cosmo_type": "D_dt", # sampling of the time-delay distance # explicit modelling of the astrometric imperfection of # the point source positions "point_source_offset": True, } kwargs_likelihood = { "check_bounds": True, "point_source_likelihood": True, "position_uncertainty": args["astrometry_sigma"], "check_solver": True, "solver_tolerance": 0.001, "time_delay_likelihood": True, "image_likelihood": False, # this needs to be explicitly given when not having imaging data "flux_ratio_likelihood": True, # enables the flux ratio likelihood } kwargs_data_joint = { "time_delays_measured": d_dt, "time_delays_uncertainties": d_dt_sigma, "flux_ratios": flux_ratios, "flux_ratio_errors": flux_ratio_errors, } kwargs_model = { "lens_model_list": lens_model_list, "point_source_model_list": point_source_list, } mpi = False # MPI possible, but not supported through that notebook. fitting_seq = FittingSequence( kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, ) fitting_kwargs_list = [ # ['update_settings', {'lens_add_fixed': [[0, ['gamma']]]}], [ "PSO", { "sigma_scale": 1.0, "n_particles": 100, "n_iterations": 100 } ] ] chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence( fitting_kwargs_list) lens_result, source_result, lens_light_result, ps_result, cosmo_result = ( fitting_seq.best_fit()) # and now we run the MCMC fitting_kwargs_list = [ [ "PSO", { "sigma_scale": 0.1, "n_particles": 100, "n_iterations": 100 } ], [ "MCMC", { "n_burn": 200, "n_run": 200, "walkerRatio": 10, "sigma_scale": 0.1 }, ], ] chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence( fitting_kwargs_list) lens_result, source_result, lens_light_result, ps_result, cosmo_result = ( fitting_seq.best_fit()) # print("number of non-linear parameters in the MCMC process: ", len(param_mcmc)) # print("parameters in order: ", param_mcmc) print("number of evaluations in the MCMC process: ", np.shape(samples_mcmc)[0]) param = Param( kwargs_model, fixed_lens, kwargs_fixed_ps=fixed_ps, kwargs_fixed_cosmo=fixed_cosmo, kwargs_lens_init=lens_result, **kwargs_constraints, ) # the number of non-linear parameters and their names num_param, param_list = param.num_param() lensAnalysis = LensAnalysis(kwargs_model) mcmc_new_list = [] labels_new = [ r"$\gamma$", r"$\phi_{ext}$", r"$\gamma_{ext}$", r"$D_{\Delta t}$", ] D_dt = np.zeros(len(samples_mcmc)) theta_E = np.zeros(len(samples_mcmc)) for i in range(len(samples_mcmc)): # transform the parameter position of the MCMC chain in a # lenstronomy convention with keyword arguments kwargs_lens_out, kwargs_light_source_out, kwargs_light_lens_out, kwargs_ps_out, kwargs_cosmo = param.args2kwargs( samples_mcmc[i]) D_dt[i] = kwargs_cosmo["D_dt"] gamma[i] = kwargs_lens_out[0]["gamma"] theta_E[i] = kwargs_lens_out[0]['theta_E'] e1[i] = kwargs_lens_out[0]['e1'] e2[i] = kwargs_lens_out[0]['e2'] phi_ext, gamma_ext = lensAnalysis._lensModelExtensions.external_shear( kwargs_lens_out) # plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True) # and here the predicted angular diameter distance from a # default cosmology (attention for experimenter bias!) cosmo = FlatLambdaCDM( H0=71, Om0=0.3089, Ob0=0., ) lensCosmo = LensCosmo( cosmo=cosmo, z_lens=z_lens, z_source=z_source, ) gamma = np.mean(gamma) phi_ext = np.mean(phi_ext) gamma_ext = np.mean(gamma_ext) theta_E = np.mean(theta_E) D_dt = np.mean(D_dt) results["gamma"].append(gamma) results["phi_ext"].append(phi_ext) results["gamma_ext"].append(gamma_ext) results["theta_E"].append(theta_E) results["D_dt"].append(lensCosmo.D_dt) with open( "./quasars_%s_nimgs_%s_%s.json" % (args["los"], args["nimgs"], args["version"]), 'w') as fout: json.dump(results, fout)
def test_fitting_sequence(self): # kwargs_init = [self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps] lens_sigma = [{ 'theta_E': 0.1, 'gamma': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1 }, { 'e1': 0.1, 'e2': 0.1 }] lens_lower = [{ 'theta_E': 0., 'gamma': 1.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4 }, { 'e1': -0.3, 'e2': -0.3 }] lens_upper = [{ 'theta_E': 10., 'gamma': 2.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4 }, { 'e1': 0.3, 'e2': 0.3 }] source_sigma = [{ 'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1, 'e1': 0.1, 'e2': 0.1 }] source_lower = [{ 'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4 }] source_upper = [{ 'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4 }] lens_light_sigma = [{ 'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1 }] lens_light_lower = [{ 'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2 }] lens_light_upper = [{ 'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2 }] ps_sigma = [{'ra_source': 1, 'dec_source': 1, 'point_amp': 1}] lens_param = self.kwargs_lens, lens_sigma, [{}, { 'ra_0': 0, 'dec_0': 0 }], lens_lower, lens_upper source_param = self.kwargs_source, source_sigma, [ {} ], source_lower, source_upper lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{ 'center_x': 0 }], lens_light_lower, lens_light_upper ps_param = self.kwargs_ps, ps_sigma, [{} ], self.kwargs_ps, self.kwargs_ps kwargs_params = { 'lens_model': lens_param, 'source_model': source_param, 'lens_light_model': lens_light_param, 'point_source_model': ps_param, # 'cosmography': cosmo_param } # kwargs_params = [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_init, kwargs_init] image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics] multi_band_list = [image_band] kwargs_data_joint = { 'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear' } fittingSequence = FittingSequence(kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, self.kwargs_likelihood, kwargs_params) kwargs_result = fittingSequence.best_fit(bijective=False) lens_temp = kwargs_result['kwargs_lens'] npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=2) logL = fittingSequence.best_fit_likelihood print(logL, 'test') #print(lens_temp, source_temp, lens_light_temp, ps_temp, cosmo_temp) assert logL < 0 bic = fittingSequence.bic assert bic > 0 #npt.assert_almost_equal(bic, 20000000220.29376, decimal=-4) #npt.assert_almost_equal(logL, -10000000061.792593, decimal=-4) n_p = 2 n_i = 2 fitting_list = [] kwargs_pso = { 'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i } fitting_list.append(['PSO', kwargs_pso]) kwargs_mcmc = { 'sigma_scale': 0.1, 'n_burn': 1, 'n_run': 1, 'walkerRatio': 2 } fitting_list.append(['MCMC', kwargs_mcmc]) kwargs_mcmc['re_use_samples'] = True fitting_list.append(['MCMC', kwargs_mcmc]) kwargs_mcmc['sampler_type'] = 'EMCEE' fitting_list.append(['MCMC', kwargs_mcmc]) kwargs_align = { 'lowerLimit': -0.1, 'upperLimit': 0.1, 'n_particles': 2, 'n_iterations': 2 } fitting_list.append(['align_images', kwargs_align]) kwargs_psf_iter = { 'num_iter': 2, 'psf_iter_factor': 0.5, 'stacking_method': 'mean' } fitting_list.append(['psf_iteration', kwargs_psf_iter]) fitting_list.append(['restart', None]) fitting_list.append(['fix_not_computed', {'free_bands': [True]}]) n_sersic_overwrite = 4 kwargs_update = { 'lens_light_add_fixed': [[0, ['n_sersic'], [n_sersic_overwrite]]], 'lens_light_remove_fixed': [[0, ['center_x']]], 'change_source_lower_limit': [[0, ['n_sersic'], [0.1]]], 'change_source_upper_limit': [[0, ['n_sersic'], [10]]] } fitting_list.append(['update_settings', kwargs_update]) #kwargs_model = {}, kwargs_constraints = {}, kwargs_likelihood = {}, lens_add_fixed = [], #source_add_fixed = [], lens_light_add_fixed = [], ps_add_fixed = [], cosmo_add_fixed = [], lens_remove_fixed = [], #source_remove_fixed = [], lens_light_remove_fixed = [], ps_remove_fixed = [], cosmo_remove_fixed = [] chain_list = fittingSequence.fit_sequence(fitting_list) lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed, extinction_fixed = fittingSequence._updateManager._fixed_kwargs kwargs_result = fittingSequence.best_fit(bijective=False) npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=1) npt.assert_almost_equal( fittingSequence._updateManager._lens_light_fixed[0]['n_sersic'], n_sersic_overwrite, decimal=8) npt.assert_almost_equal(lens_light_fixed[0]['n_sersic'], 4, decimal=-1) assert fittingSequence._updateManager._lower_kwargs[1][0][ 'n_sersic'] == 0.1 assert fittingSequence._updateManager._upper_kwargs[1][0][ 'n_sersic'] == 10 # Nested sampler tests # further decrease the parameter space for nested samplers to run faster fitting_list2 = [] kwargs_update2 = { 'ps_add_fixed': [[0, ['ra_source', 'dec_source'], [0, 0]]], 'lens_light_add_fixed': [[ 0, ['n_sersic', 'R_sersic', 'center_x', 'center_y'], [4, .1, 0, 0] ]], 'source_add_fixed': [[ 0, ['R_sersic', 'e1', 'e2', 'center_x', 'center_y'], [.6, .1, .1, 0, 0] ]], 'lens_add_fixed': [[ 0, ['gamma', 'theta_E', 'e1', 'e2', 'center_x', 'center_y'], [1.8, 1., .1, .1, 0, 0] ], [1, ['e1', 'e2'], [0.01, 0.01]]], 'change_source_lower_limit': [[0, ['n_sersic'], [2.9]]], 'change_source_upper_limit': [[0, ['n_sersic'], [3.1]]] } fitting_list2.append(['update_settings', kwargs_update2]) kwargs_multinest = { 'sampler_type': 'MULTINEST', 'kwargs_run': { 'n_live_points': 10, 'evidence_tolerance': 0.5, 'sampling_efficiency': 0.8, # 1 for posterior-only, 0 for evidence-only 'importance_nested_sampling': False, 'multimodal': True, 'const_efficiency_mode': False, # reduce sampling_efficiency to 5% when True }, 'remove_output_dir': True, } fitting_list2.append(['nested_sampling', kwargs_multinest]) kwargs_dynesty = { 'sampler_type': 'DYNESTY', 'kwargs_run': { 'dlogz_init': 0.01, 'nlive_init': 3, 'nlive_batch': 3, 'maxbatch': 1, }, } fitting_list2.append(['nested_sampling', kwargs_dynesty]) kwargs_dypolychord = { 'sampler_type': 'DYPOLYCHORD', 'kwargs_run': { 'ninit': 8, 'nlive_const': 10, #'seed_increment': 1, 'resume_dyn_run': False, #'init_step': 10, }, 'polychord_settings': { 'seed': 1, #'num_repeats': 20 }, 'dypolychord_dynamic_goal': 0.8, # 1 for posterior-only, 0 for evidence-only 'remove_output_dir': True, } fitting_list2.append(['nested_sampling', kwargs_dypolychord]) chain_list2 = fittingSequence.fit_sequence(fitting_list2) kwargs_fixed = fittingSequence._updateManager._fixed_kwargs npt.assert_almost_equal(kwargs_fixed[0][1]['e1'], 0.01, decimal=2) assert fittingSequence._updateManager._lower_kwargs[1][0][ 'n_sersic'] == 2.9 assert fittingSequence._updateManager._upper_kwargs[1][0][ 'n_sersic'] == 3.1 kwargs_test = {'kwargs_lens': 1} fittingSequence.update_state(kwargs_test) kwargs_out = fittingSequence.best_fit(bijective=True) assert kwargs_out['kwargs_lens'] == 1
def main(): args = script_utils.parse_inference_args() test_cfg = TestConfig.from_file(args.test_config_file_path) baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path) cfg = TrainValConfig.from_file(test_cfg.train_val_config_file_path) # Set device and default data type device = torch.device(test_cfg.device_type) if device.type == 'cuda': torch.set_default_tensor_type('torch.cuda.' + cfg.data.float_type) else: torch.set_default_tensor_type('torch.' + cfg.data.float_type) script_utils.seed_everything(test_cfg.global_seed) ############ # Data I/O # ############ train_data = XYData( is_train=True, Y_cols=cfg.data.Y_cols, float_type=cfg.data.float_type, define_src_pos_wrt_lens=cfg.data.define_src_pos_wrt_lens, rescale_pixels=cfg.data.rescale_pixels, rescale_pixels_type=cfg.data.rescale_pixels_type, log_pixels=cfg.data.log_pixels, add_pixel_noise=cfg.data.add_pixel_noise, eff_exposure_time=cfg.data.eff_exposure_time, train_Y_mean=None, train_Y_std=None, train_baobab_cfg_path=cfg.data.train_baobab_cfg_path, val_baobab_cfg_path=None, for_cosmology=False) # Define val data and loader test_data = XYData( is_train=False, Y_cols=cfg.data.Y_cols, float_type=cfg.data.float_type, define_src_pos_wrt_lens=cfg.data.define_src_pos_wrt_lens, rescale_pixels=cfg.data.rescale_pixels, rescale_pixels_type=cfg.data.rescale_pixels_type, log_pixels=cfg.data.log_pixels, add_pixel_noise=cfg.data.add_pixel_noise, eff_exposure_time=cfg.data.eff_exposure_time, train_Y_mean=train_data.train_Y_mean, train_Y_std=train_data.train_Y_std, train_baobab_cfg_path=cfg.data.train_baobab_cfg_path, val_baobab_cfg_path=test_cfg.data.test_baobab_cfg_path, for_cosmology=True) master_truth = test_data.Y_df master_truth = metadata_utils.add_qphi_columns(master_truth) master_truth = metadata_utils.add_gamma_psi_ext_columns(master_truth) # Figure out how many lenses BNN will predict on (must be consecutive) if test_cfg.data.lens_indices is None: if args.lens_indices_path is None: # Test on all n_test lenses in the test set n_test = test_cfg.data.n_test lens_range = range(n_test) else: # Test on the lens indices in a text file at the specified path lens_range = [] with open(args.lens_indices_path, "r") as f: for line in f: lens_range.append(int(line.strip())) n_test = len(lens_range) print("Performing H0 inference on {:d} specified lenses...".format( n_test)) else: if args.lens_indices_path is None: # Test on the lens indices specified in the test config file lens_range = test_cfg.data.lens_indices n_test = len(lens_range) print("Performing H0 inference on {:d} specified lenses...".format( n_test)) else: raise ValueError( "Specific lens indices were specified in both the test config file and the command-line argument." ) batch_size = max(lens_range) + 1 test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, drop_last=True) # Output directory into which the H0 histograms and H0 samples will be saved out_dir = test_cfg.out_dir if not os.path.exists(out_dir): os.makedirs(out_dir) print("Destination folder path: {:s}".format(out_dir)) else: raise OSError("Destination folder already exists.") ##################### # Parameter penalty # ##################### # Instantiate original loss function with all BNN-predicted params orig_Y_cols = cfg.data.Y_cols loss_fn = getattr(h0rton.losses, cfg.model.likelihood_class)(Y_dim=test_data.Y_dim, device=device) # Not all predicted params will be sampled via MCMC params_to_remove = [] #'lens_light_R_sersic', 'src_light_R_sersic'] mcmc_Y_cols = [col for col in orig_Y_cols if col not in params_to_remove] mcmc_Y_dim = len(mcmc_Y_cols) # Instantiate loss function with just the MCMC params mcmc_loss_fn = getattr(h0rton.losses, cfg.model.likelihood_class)( Y_dim=test_data.Y_dim - len(params_to_remove), device=device) remove_param_idx, remove_idx = mcmc_utils.get_idx_for_params( mcmc_loss_fn.out_dim, orig_Y_cols, params_to_remove, cfg.model.likelihood_class) mcmc_train_Y_mean = np.delete(train_data.train_Y_mean, remove_param_idx) mcmc_train_Y_std = np.delete(train_data.train_Y_std, remove_param_idx) parameter_penalty = mcmc_utils.HybridBNNPenalty( mcmc_Y_cols, cfg.model.likelihood_class, mcmc_train_Y_mean, mcmc_train_Y_std, test_cfg.h0_posterior.exclude_velocity_dispersion, device) custom_logL_addition = parameter_penalty.evaluate null_spread = False ################### # BNN predictions # ################### # Instantiate BNN model net = getattr(h0rton.models, cfg.model.architecture)(num_classes=loss_fn.out_dim, dropout_rate=cfg.model.dropout_rate) net.to(device) # Load trained weights from saved state net, epoch = train_utils.load_state_dict_test(test_cfg.state_dict_path, net, cfg.optim.n_epochs, device) # When only generating BNN predictions (and not running MCMC), we can afford more n_dropout # otherwise, we fix n_dropout = mcmc_Y_dim + 1 if test_cfg.export.pred: n_dropout = 20 n_samples_per_dropout = test_cfg.numerics.mcmc.walkerRatio else: n_walkers = test_cfg.numerics.mcmc.walkerRatio * ( mcmc_Y_dim + 1) # (BNN params + D_dt) times walker ratio n_dropout = n_walkers // test_cfg.numerics.mcmc.walkerRatio n_samples_per_dropout = test_cfg.numerics.mcmc.walkerRatio # Initialize arrays that will store samples and BNN predictions init_pos = np.empty( [batch_size, n_dropout, n_samples_per_dropout, mcmc_Y_dim]) mcmc_pred = np.empty([batch_size, n_dropout, mcmc_loss_fn.out_dim]) with torch.no_grad(): net.train() # Send some empty forward passes through the test data without backprop to adjust batchnorm weights # (This is often not necessary. Beware if using for just 1 lens.) for nograd_pass in range(5): for X_, Y_ in test_loader: X = X_.to(device) _ = net(X) # Obtain MC dropout samples for d in range(n_dropout): net.eval() for X_, Y_ in test_loader: X = X_.to(device) Y = Y_.to(device) pred = net(X) break mcmc_pred_d = pred.cpu().numpy() # Replace BNN posterior's primary gaussian mean with truth values if test_cfg.lens_posterior_type == 'default_with_truth_mean': mcmc_pred_d[:, :len(mcmc_Y_cols)] = Y[:, :len(mcmc_Y_cols )].cpu().numpy() # Leave only the MCMC parameters in pred mcmc_pred_d = mcmc_utils.remove_parameters_from_pred( mcmc_pred_d, remove_idx, return_as_tensor=False) # Populate pred that will define the MCMC penalty function mcmc_pred[:, d, :] = mcmc_pred_d # Instantiate posterior to generate BNN samples, which will serve as initial positions for walkers bnn_post = getattr(h0rton.h0_inference.gaussian_bnn_posterior_cpu, loss_fn.posterior_name + 'CPU')( mcmc_Y_dim, mcmc_train_Y_mean, mcmc_train_Y_std) bnn_post.set_sliced_pred(mcmc_pred_d) init_pos[:, d, :, :] = bnn_post.sample( n_samples_per_dropout, sample_seed=test_cfg.global_seed + d) # contains just the lens model params, no D_dt gc.collect() # Terminate right after generating BNN predictions (no MCMC) if test_cfg.export.pred: import sys samples_path = os.path.join(out_dir, 'samples.npy') np.save(samples_path, init_pos) sys.exit() ############# # MCMC loop # ############# # Convolve MC dropout iterates with aleatoric samples init_pos = init_pos.transpose(0, 3, 1, 2).reshape( [batch_size, mcmc_Y_dim, -1]).transpose(0, 2, 1) # [batch_size, n_samples, mcmc_Y_dim] init_D_dt = np.random.uniform(0.0, 15000.0, size=(batch_size, n_walkers, 1)) pred_mean = np.mean(init_pos, axis=1) # [batch_size, mcmc_Y_dim] # Define assumed model profiles kwargs_model = dict(lens_model_list=['PEMD', 'SHEAR'], point_source_model_list=['SOURCE_POSITION'], source_light_model_list=['SERSIC_ELLIPSE']) astro_sig = test_cfg.image_position_likelihood.sigma # astrometric uncertainty # Get H0 samples for each system if not test_cfg.time_delay_likelihood.baobab_time_delays: if 'abcd_ordering_i' not in master_truth: raise ValueError( "If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec." ) kwargs_lens_eqn_solver = { 'min_distance': 0.05, 'search_window': baobab_cfg.instrument['pixel_scale'] * baobab_cfg.image['num_pix'], 'num_iter_max': 200 } total_progress = tqdm(total=n_test) realized_time_delays = pd.read_csv( test_cfg.error_model.realized_time_delays, index_col=None) # For each lens system... for i, lens_i in enumerate(lens_range): # Each lens gets a unique random state for time delay measurement error realizations. #rs_lens = np.random.RandomState(lens_i) # replaced with externally rendered time delays ########################### # Relevant data and prior # ########################### data_i = master_truth.iloc[lens_i].copy() # Set BNN pred defining parameter penalty for this lens, batch processes across n_dropout parameter_penalty.set_bnn_post_params(mcmc_pred[lens_i, :, :]) # Initialize lens model params walkers at the predictive mean init_info = dict( zip(mcmc_Y_cols, pred_mean[lens_i, :] * mcmc_train_Y_std + mcmc_train_Y_mean)) lcdm = LCDM(z_lens=data_i['z_lens'], z_source=data_i['z_src'], flat=True) true_img_dec = literal_eval(data_i['y_image']) n_img = len(true_img_dec) measured_td_sig = test_cfg.time_delay_likelihood.sigma measured_td_wrt0 = np.array( literal_eval( realized_time_delays.iloc[lens_i]['measured_td_wrt0'])) kwargs_data_joint = dict( time_delays_measured=measured_td_wrt0, time_delays_uncertainties=measured_td_sig, ) ############################# # Parameter init and bounds # ############################# lens_kwargs = mcmc_utils.get_lens_kwargs(init_info, null_spread=null_spread) ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(init_info, astro_sig) src_light_kwargs = mcmc_utils.get_light_kwargs( init_info['src_light_R_sersic'], null_spread=null_spread) special_kwargs = mcmc_utils.get_special_kwargs( n_img, astro_sig ) # image position offset and time delay distance, aka the "special" parameters kwargs_params = { 'lens_model': lens_kwargs, 'point_source_model': ps_kwargs, 'source_model': src_light_kwargs, 'special': special_kwargs, } if test_cfg.numerics.solver_type == 'NONE': solver_type = 'NONE' else: solver_type = 'PROFILE_SHEAR' if n_img == 4 else 'CENTER' #solver_type = 'NONE' kwargs_constraints = { 'num_point_source_list': [n_img], 'Ddt_sampling': True, 'solver_type': solver_type, } kwargs_likelihood = { 'time_delay_likelihood': True, 'sort_images_by_dec': True, 'prior_lens': [], 'prior_special': [], 'check_bounds': True, 'check_matched_source_position': False, 'source_position_tolerance': 0.01, 'source_position_sigma': 0.01, 'source_position_likelihood': False, 'custom_logL_addition': custom_logL_addition, 'kwargs_lens_eqn_solver': kwargs_lens_eqn_solver } ########################### # MCMC posterior sampling # ########################### fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, verbose=False, mpi=False) if i == 0: param_class = fitting_seq._updateManager.param_class n_params, param_class_Y_cols = param_class.num_param() init_pos = mcmc_utils.reorder_to_param_class( mcmc_Y_cols, param_class_Y_cols, init_pos, init_D_dt) # MCMC sample from the post-processed BNN posterior jointly with cosmology lens_i_start_time = time.time() if test_cfg.lens_posterior_type == 'default': test_cfg.numerics.mcmc.update(init_samples=init_pos[lens_i, :, :]) fitting_kwargs_list_mcmc = [['MCMC', test_cfg.numerics.mcmc]] #try: with script_utils.HiddenPrints(): chain_list_mcmc = fitting_seq.fit_sequence( fitting_kwargs_list_mcmc) kwargs_result_mcmc = fitting_seq.best_fit() lens_i_end_time = time.time() inference_time = (lens_i_end_time - lens_i_start_time) / 60.0 # min ############################# # Plotting the MCMC samples # ############################# # sampler_type : 'EMCEE' # samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]` # param_mcmc : list of str of length n_params, the parameter names sampler_type, samples_mcmc, param_mcmc, _ = chain_list_mcmc[0] new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain( kwargs_result_mcmc, samples_mcmc, kwargs_model, lens_kwargs[2], ps_kwargs[2], src_light_kwargs[2], special_kwargs[2], kwargs_constraints) # Plot D_dt histogram D_dt_samples = new_samples_mcmc['D_dt'].values true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.3) data_i['D_dt'] = true_D_dt # Export D_dt samples for this lens lens_inference_dict = dict( D_dt_samples=D_dt_samples, # kappa_ext=0 for these samples inference_time=inference_time, true_D_dt=true_D_dt, ) lens_inference_dict_save_path = os.path.join( out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i)) np.save(lens_inference_dict_save_path, lens_inference_dict) # Optionally export the MCMC samples if test_cfg.export.mcmc_samples: mcmc_samples_path = os.path.join( out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i)) new_samples_mcmc.to_csv(mcmc_samples_path, index=None) # Optionally export the D_dt histogram if test_cfg.export.D_dt_histogram: cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal( D_dt_samples, 3) _ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples, lens_i, true_D_dt, save_dir=out_dir) # Optionally export the plot of MCMC chain if test_cfg.export.mcmc_chain: mcmc_chain_path = os.path.join( out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i)) plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path) # Optionally export posterior cornerplot of select lens model parameters with D_dt if test_cfg.export.mcmc_corner: mcmc_corner_path = os.path.join( out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i)) plotting_utils.plot_mcmc_corner( new_samples_mcmc[test_cfg.export.mcmc_cols], data_i[test_cfg.export.mcmc_cols], test_cfg.export.mcmc_col_labels, mcmc_corner_path) total_progress.update(1) gc.collect() realized_time_delays.to_csv(os.path.join(out_dir, 'realized_time_delays.csv'), index=None) total_progress.close()
class ForwardModel(bnn_inference.InferenceClass): """ A class that inherets from InferenceClass and adds the ability to forward # model. """ def __init__(self, cfg, lite_class=False, test_set_path=None): """ Initialize the ForwardModel instance using the parameters of the configuration file. Parameters: cfg (dict): The dictionary attained from reading the json config file. lite_class (bool): If True, do not bother loading the BNN model weights. This allows the user to save on memory, but will cause an error if the BNN samples have not already been drawn. test_set_path (str): The path to the set of images that the forward modeling image will be pulled from. If None, the path to the validation set images will be used. """ # Initialize the BNN inference class. super(ForwardModel, self).__init__(cfg, lite_class, test_set_path) # We will use the baobab code to generate our images and then calculate # the likelihood manually. # First we get the psf model self.baobab_cfg = configs.BaobabConfig.from_file( self.baobab_config_path) # Add the lens and source models specified in the config. Currently # no light model can be specified. Note that any self variable # starting with the prefix ls_ is for use with lenstronomy. self.ls_lens_model_list = [] fixed_lens = [] kwargs_lens_init = [] kwargs_lens_sigma = [] kwargs_lower_lens = [] kwargs_upper_lens = [] self.ls_source_model_list = [] fixed_source = [] kwargs_source_init = [] kwargs_source_sigma = [] kwargs_lower_source = [] kwargs_upper_source = [] # For now, each of the distribution options are hard coded toghether # with reasonable choices for their parameters. if 'PEMD' in cfg['forward_mod_params']['lens_model_list']: self.ls_lens_model_list.append('PEMD') fixed_lens.append({}) kwargs_lens_init.append({ 'theta_E': 0.7, 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0., 'gamma': 2.0 }) kwargs_lens_sigma.append({ 'theta_E': .2, 'e1': 0.05, 'e2': 0.05, 'center_x': 0.05, 'center_y': 0.05, 'gamma': 0.2 }) kwargs_lower_lens.append({ 'theta_E': 0.01, 'e1': -0.5, 'e2': -0.5, 'center_x': -10, 'center_y': -10, 'gamma': 0.01 }) kwargs_upper_lens.append({ 'theta_E': 10., 'e1': 0.5, 'e2': 0.5, 'center_x': 10, 'center_y': 10, 'gamma': 10 }) if 'SHEAR_GAMMA_PSI' in cfg['forward_mod_params']['lens_model_list']: self.ls_lens_model_list.append('SHEAR_GAMMA_PSI') fixed_lens.append({'ra_0': 0, 'dec_0': 0}) kwargs_lens_init.append({'gamma_ext': 0.2, 'psi_ext': 0.0}) kwargs_lens_sigma.append({'gamma_ext': 0.1, 'psi_ext': 0.1}) kwargs_lower_lens.append({'gamma_ext': 0, 'psi_ext': -0.5 * np.pi}) kwargs_upper_lens.append({'gamma_ext': 10, 'psi_ext': 0.5 * np.pi}) if 'SERSIC_ELLIPSE' in cfg['forward_mod_params']['source_model_list']: self.ls_source_model_list.append('SERSIC_ELLIPSE') fixed_source.append({}) kwargs_source_init.append({ 'R_sersic': 0.2, 'n_sersic': 1, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0 }) kwargs_source_sigma.append({ 'n_sersic': 0.5, 'R_sersic': 0.1, 'e1': 0.05, 'e2': 0.05, 'center_x': 0.2, 'center_y': 0.2 }) kwargs_lower_source.append({ 'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.001, 'n_sersic': .5, 'center_x': -10, 'center_y': -10 }) kwargs_upper_source.append({ 'e1': 0.5, 'e2': 0.5, 'R_sersic': 10, 'n_sersic': 5., 'center_x': 10, 'center_y': 10 }) # Feed all of the above params into lists self.ls_lens_params = [ kwargs_lens_init, kwargs_lens_sigma, fixed_lens, kwargs_lower_lens, kwargs_upper_lens ] self.ls_source_params = [ kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source ] self.ls_kwargs_params = { 'lens_model': self.ls_lens_params, 'source_model': self.ls_source_params } # Some of the likelihood parameters being used by Lenstronomy self.ls_kwargs_likelihood = {'source_marg': False} self.ls_kwargs_model = { 'lens_model_list': self.ls_lens_model_list, 'source_light_model_list': self.ls_source_model_list } # We will also need some of the noise kwargs. We will feed the # lenstronomy version straight to lenstronomy and the tensorflow # version to our pipeline for selecting the image. bandpass = self.baobab_cfg.survey_info.bandpass_list[0] detector = self.baobab_cfg.survey_object_dict[bandpass] detector_kwargs = detector.kwargs_single_band() self.noise_kwargs = self.baobab_cfg.get_noise_kwargs(bandpass) self.noise_function = noise_tf.NoiseModelTF(**self.noise_kwargs) self.ls_kwargs_psf = instantiate_PSF_kwargs( self.baobab_cfg.psf['type'], detector_kwargs['pixel_scale'], seeing=detector_kwargs['seeing'], kernel_size=detector.psf_kernel_size, which_psf_maps=self.baobab_cfg.psf['which_psf_maps']) # The kwargs for the numerics. These should match what was used # to generate the image. self.ls_kwargs_numerics = { 'supersampling_factor': (self.baobab_cfg.numerics.supersampling_factor), 'supersampling_convolution': False } # Pull the needed information from the config file. self.lens_params = self.cfg['dataset_params']['lens_params'] # Get the model parameter kwargs self.ls_kwargs_model = { 'lens_model_list': self.ls_lens_model_list, 'source_light_model_list': self.ls_source_model_list } # Set flags to make sure things are initialzied. self.image_selected = False self.sampler_init = False def select_image(self, image_index, block=True): """ Select the image to conduct forward modeling on. Parameters: image_index (int): The index of the image to use. """ # Load the metadata file metadata = pd.read_csv( os.path.join(self.cfg['validation_params']['root_path'], 'metadata.csv')) # Get the image filename img_filename = 'X_{0:07d}.npy'.format(image_index) # Load the true image. self.true_image = np.load( os.path.join(self.cfg['validation_params']['root_path'], img_filename)).astype(np.float32) # Show the image without noise print('True image without noise.') plt.imshow(self.true_image, cmap=cm.magma) plt.colorbar() plt.show(block=block) # Set the random seed since we will be using it to add # noise tf.random.set_seed(self.cfg['training_params']['random_seed']) # Add noise and show the new image_index self.true_image_noise = self.noise_function.add_noise( self.true_image).numpy() print('True image with noise.') plt.imshow(self.true_image_noise, cmap=cm.magma) plt.colorbar() plt.show(block=block) # Extract the data kwargs (including noise kwargs) being used # by lenstronomy. _, _, ra_0, dec_0, _, _, Mpix2coord, _ = ( util.make_grid_with_coordtransform( numPix=self.baobab_cfg.image['num_pix'], deltapix=self.baobab_cfg.instrument['pixel_scale'], center_ra=0, center_dec=0, subgrid_res=1, inverse=self.baobab_cfg.image['inverse'])) # Update the lenstronomy kwargs with the image information noise_dict = noise_lenstronomy.get_noise_sigma2_lenstronomy( self.true_image_noise, **self.noise_kwargs) self.ls_kwargs_data = { 'background_rms': np.sqrt(noise_dict['sky'] + noise_dict['readout']), 'exposure_time': (self.noise_kwargs['exposure_time'] * self.noise_kwargs['num_exposures']), 'ra_at_xy_0': ra_0, 'dec_at_xy_0': dec_0, 'transform_pix2angle': Mpix2coord, 'image_data': self.true_image_noise } self.ls_multi_band_list = [[ self.ls_kwargs_data, self.ls_kwargs_psf, self.ls_kwargs_numerics ]] # Find, save, and print the parameters for this image. image_data = metadata[metadata['img_filename'] == img_filename] self.true_values = image_data.to_dict(orient='index')[image_index] print('Image data') print(self.true_values) # Note that image has been selected. self.image_selected = True def initialize_sampler(self, walker_ratio, chains_save_path): """ Initialize the sampler to be used by run_samples. Parameters: walker_ratio (int): The number of walkers per free parameter. Must be at least 2. save_path (str): An h5 path specifying where to save the sampler chains. If a sampler chain is already present in the path it will be loaded. """ if self.image_selected is False: raise RuntimeError('Select an image before starting your sampler') # Set up the fitting sequence and fitting kwargs from lenstronomy self.walker_ratio = walker_ratio self.chains_save_path = chains_save_path ls_kwargs_data_joint = { 'multi_band_list': self.ls_multi_band_list, 'multi_band_type': 'multi-linear' } ls_kwargs_constraints = {} self.fitting_seq = FittingSequence(ls_kwargs_data_joint, self.ls_kwargs_model, ls_kwargs_constraints, self.ls_kwargs_likelihood, self.ls_kwargs_params) self.sampler_init = True def run_sampler(self, n_samps): """ Run an emcee sampler to get a posterior on the hyperparameters. Parameters: n_samps (int): The number of samples to take """ if self.sampler_init is False: raise RuntimeError( 'Must initialize sampler before running sampler') # Notify user if chains were found if os.path.isfile(self.chains_save_path): print('Using chains found at %s' % (self.chains_save_path)) self.start_from_backup = True else: print('No chains found at %s' % (self.chains_save_path)) self.start_from_backup = False # Initialize the fitting kwargs to be passed to the lenstronomy # fitting sequence. We set burnin to 0 since we would like to be # responsible for the burnin. fitting_kwargs_list = [[ 'MCMC', { 'n_burn': 0, 'n_run': n_samps, 'walkerRatio': self.walker_ratio, 'sigma_scale': 0.1, 'backup_filename': self.chains_save_path, 'start_from_backup': self.start_from_backup } ]] chain_list = self.fitting_seq.fit_sequence(fitting_kwargs_list) # Extract the relevant outputs: self.chain_params = chain_list[0][2] # I want the walkers to be seperate so I can chose my own burnin # adventure here. self.chains = chain_list[0][1].reshape( (-1, len(self.chain_params) * self.walker_ratio, len(self.chain_params))) # Convert chain_params naming convention to the one used by baobab renamed_params = [] for param in self.chain_params: if 'lens0' in param: renamed_params.append('lens_mass_' + param[:-6]) if 'lens1' in param: renamed_params.append('external_shear_' + param[:-6]) if 'source_light0' in param: renamed_params.append('src_light_' + param[:-14]) self.chain_params = renamed_params def plot_chains(self, burnin=None, block=True): """ Plot the chains resulting from the emcee to figure out what the correct burnin is. Parameters: burnin (int): How many of the initial samples to drop as burnin block (bool): If true, block excecution after plt.show() command """ # Extract and plot the chains if burnin is not None: chains = self.chains[burnin:] else: chains = self.chains for ci, chain in enumerate(chains.T): plt.plot(chain.T, '.') plt.title(self.chain_params[ci]) plt.ylabel(self.chain_params[ci]) plt.xlabel('sample') plt.axhline(self.true_values[self.chain_params[ci]], c='k') plt.show(block=block) def _correct_chains(self, chains, param_names, true_values): """ Correct the chains and true values so that their convention agrees with what was used to train the BNN. Parameters: chains (np.array): A numpy array containing the chain in the original parameter space. Dimensions should be (n_samples, n_params). param_names ([str,...]): A list of string containing the names of each of the parameters in chains. true_values (np.array): A numpy array with the true values for each parameter in the untransformed parameterization. Should have dimensions (n_params). Returns: [str,...]: A list containing the corrected parameter names. Everything else is changed in place. TODO: Integrate this directly with the dataset code. """ # Go through the parameters and find which ones need to be corrected param_names = np.array(param_names) new_param_names = np.copy(param_names) dat_params = self.cfg['dataset_params'] # First get all of the parameters that were changed to a cartesian # format. for rat_param, ang_param, param_prefix in zip( dat_params['gampsi']['gampsi_params_rat'], dat_params['gampsi']['gampsi_params_ang'], dat_params['gampsi']['gampsi_parameter_prefixes']): # Pull the gamma and angle parameter. gamma = chains[:, param_names == rat_param] ang = chains[:, param_names == ang_param] # Calculate g1 and g2. g1 = gamma * np.cos(2 * ang) g2 = gamma * np.sin(2 * ang) # Change the name and the values new_param_names[param_names == rat_param] = param_prefix + '_g1' new_param_names[param_names == ang_param] = param_prefix + '_g2' chains[:, param_names == rat_param] = g1 chains[:, param_names == ang_param] = g2 # Make the same change in the true values gamma = true_values[param_names == rat_param] ang = true_values[param_names == ang_param] # Calculate g1 and g2. g1 = gamma * np.cos(2 * ang) g2 = gamma * np.sin(2 * ang) true_values[param_names == rat_param] = g1 true_values[param_names == ang_param] = g2 # Now get all of the parameters that were changed to log space. for log_param in dat_params['lens_params_log']: # Pull the parameter value value = chains[:, param_names == log_param] # Change the name and value new_param_names[param_names == log_param] = log_param + '_log' chains[:, param_names == log_param] = np.log(value) # Make the same change in the true values. true_values[param_names == log_param] = np.log( true_values[param_names == log_param]) return new_param_names def plot_posterior_contours(self, burnin, num_samples, block=True, sample_save_dir=None, color_map=['#FFAA00', '#41b6c4'], plot_limits=None, truth_color='#000000', save_fig_path=None, dpi=400, fig=None, show_plot=True, plot_fow_model=True, add_legend=True, fontsize=12): """ Plot the corner plot of chains resulting from the emcee for the lens mass parameters. Parameters: burnin (int): How many of the initial samples to drop as burnin num_samples (int): The number of bnn samples to use for the contour block (bool): If true, block excecution after plt.show() command sample_save_dir (str): A path to a folder to save/load the samples. If None samples will not be saved. Do not include .npy, this will be appended (since several files will be generated). color_map ([str,...]): A list of strings specifying the colors to use in the contour plots. plot_limits ([(float,float),..]): A list of float tuples that define the maximum and minimum plot range for each posterior parameter. truth_color (str): The color to use for plotting the truths in the corner plot. save_fig_path (str): If specified, the figure will be saved to that path. dpi (int): The dpi to use when generating the image. fig (matplotlib.Figure): The figure to use as a starting point. Best to leave this as None unless you're passing in another corner plot. show_plot (bool): Whether or not to show the plot or just return the figure. plot_fow_model (bool): Whether or not to plot the forward modeling posteriors. This is mostly here for plotting multiple BNN posteriors on one plot. add_legend (bool): Whether or not to add an auto-generated legend. fontsize (int): The fontsize for the corner plot labels. Returns: (matplotlib.pyplot.figure): The figure object containing the contours. """ # Get the chains from the samples chains = self.chains[burnin:].reshape(-1, len(self.chain_params)) # Keep only the parameters that our BNN is predicting pi_keep = [] chain_params_keep = [] for pi, param in enumerate(self.chain_params): if param in self.lens_params: pi_keep.append(pi) chain_params_keep.append(param) # Keep only the chains related to the parameters we want to look at. chains = chains.T[pi_keep].T true_values_list = [] for param in chain_params_keep: true_values_list.append(self.true_values[param]) true_values_list = np.array(true_values_list) chain_params_keep = self._correct_chains(chains, chain_params_keep, true_values_list) # The final step is a simple reordering reordered_chains = np.zeros_like(chains) reordered_true_values = np.zeros_like(true_values_list) for pi, param in enumerate(chain_params_keep): fpi = self.final_params.index(param) reordered_chains[:, fpi] = chains[:, pi] reordered_true_values[fpi] = true_values_list[pi] # Make a corner plot for the BNN samples hist_kwargs = {'density': True, 'color': color_map[0]} self.gen_samples(num_samples, sample_save_dir=sample_save_dir, single_image=self.true_image_noise / np.std(self.true_image_noise)) corner_bnn_samples = self.predict_samps.reshape( -1, self.predict_samps.shape[-1]) fig = corner.corner(corner_bnn_samples, bins=20, labels=self.final_params_print_names, show_titles=False, plot_datapoints=False, label_kwargs=dict(fontsize=fontsize), truths=reordered_true_values, levels=[0.68, 0.95], dpi=dpi, color=color_map[0], fig=fig, fill_contours=True, range=plot_limits, truth_color=truth_color, hist_kwargs=hist_kwargs) # Now overlay the forward modeling samples if plot_fow_model: hist_kwargs['color'] = color_map[1] fig = corner.corner(reordered_chains, labels=self.final_params_print_names, bins=20, show_titles=False, plot_datapoints=False, label_kwargs=dict(fontsize=fontsize), truths=reordered_true_values, levels=[0.68, 0.95], dpi=dpi, color=color_map[1], fill_contours=True, range=plot_limits, truth_color=truth_color, hist_kwargs=hist_kwargs, fig=fig) left, bottom, width, height = [0.5725, 0.8, 0.15, 0.18] ax2 = fig.add_axes([left, bottom, width, height]) ax2.imshow(self.true_image_noise, cmap=cm.magma, origin='lower') # Add a nice legend to our contours handles = [ Line2D([0], [0], color=color_map[0], lw=10), Line2D([0], [0], color=color_map[1], lw=10) ] bnn_type = self.cfg['training_params']['bnn_type'] if bnn_type == 'gmm': bnn_type = 'GMM' else: bnn_type = bnn_type.capitalize() if add_legend: fig.legend(handles, [bnn_type + ' BNN', 'Forward Modeling'], loc=(0.55, 0.75), fontsize=20) if save_fig_path is not None: plt.savefig(save_fig_path) if show_plot: plt.show(block=block) return fig
def test_minimizer(self): n_p = 2 n_i = 2 fitting_list = [] kwargs_simplex = {'n_iterations': n_i, 'method': 'Nelder-Mead'} fitting_list.append(['SIMPLEX', kwargs_simplex]) kwargs_simplex = {'n_iterations': n_i, 'method': 'Powell'} fitting_list.append(['SIMPLEX', kwargs_simplex]) kwargs_pso = { 'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i } fitting_list.append(['PSO', kwargs_pso]) kwargs_mcmc = { 'sigma_scale': 1, 'n_burn': 1, 'n_run': 1, 'n_walkers': 10, 'sampler_type': 'EMCEE' } fitting_list.append(['MCMC', kwargs_mcmc]) kwargs_mcmc['re_use_samples'] = True kwargs_mcmc['init_samples'] = np.array([[np.random.normal(1, 0.001)] for i in range(100)]) fitting_list.append(['MCMC', kwargs_mcmc]) def custom_likelihood(kwargs_lens, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, kwargs_special=None, kwargs_extinction=None): theta_E = kwargs_lens[0]['theta_E'] return -(theta_E - 1.)**2 / 0.1**2 / 2 kwargs_likelihood = {'custom_logL_addition': custom_likelihood} kwargs_data_joint = {'multi_band_list': []} kwargs_model = {'lens_model_list': ['SIS']} kwargs_constraints = {} lens_param = [{ 'theta_E': 1, 'center_x': 0, 'center_y': 0 }], [{ 'theta_E': 0.1, 'center_x': 0.1, 'center_y': 0.1 }], [{ 'center_x': 0, 'center_y': 0 }], [{ 'theta_E': 0, 'center_x': -10, 'center_y': -10 }], [{ 'theta_E': 10, 'center_x': 10, 'center_y': 10 }] kwargs_params = {'lens_model': lens_param} fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) args = fittingSequence.param_class.kwargs2args( kwargs_lens=[{ 'theta_E': 1, 'center_x': 0, 'center_y': 0 }]) kwargs_result = fittingSequence.param_class.args2kwargs(args) print(kwargs_result) print(args, 'test args') chain_list = fittingSequence.fit_sequence(fitting_list) kwargs_result = fittingSequence.best_fit(bijective=False) npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'], 1, decimal=2)
} kwargs_likelihood = {'check_bounds': True, 'force_no_add_image': False, 'source_marg': False, 'image_position_uncertainty': 0.004, 'check_matched_source_position': True, 'source_position_tolerance': 0.001, 'time_delay_likelihood': True, } kwargs_numerics = {'supersampling_factor': 1} image_band = [kwargs_data, kwargs_psf, kwargs_numerics] multi_band_list = [image_band] kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear', 'time_delays_measured': delta_t["delta_t"].to_numpy(), 'time_delays_uncertainties': delta_t["sigma"].to_numpy(),} from lenstronomy.Workflow.fitting_sequence import FittingSequence fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) fitting_kwargs_list = [ ['PSO', {'sigma_scale': .1, 'n_particles': 200, 'n_iterations': 200}], ['MCMC', {'n_burn': 100, 'n_run': 100, 'walkerRatio': 10, 'sigma_scale': .1}] ] start_time = time.time() chain_list = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() end_time = time.time() print(end_time - start_time, 'total time needed for computation') print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
def main(): args = parse_args() test_cfg = TestConfig.from_file(args.test_config_file_path) train_val_cfg = TrainValConfig.from_file( test_cfg.train_val_config_file_path) # Set device and default data type device = torch.device(test_cfg.device_type) if device.type == 'cuda': torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') seed_everything(test_cfg.global_seed) ############ # Data I/O # ############ test_data = TDLMCData(data_cfg=train_val_cfg.data, rung_i=args.rung_idx) master_truth = test_data.cosmo_df if test_cfg.data.lens_indices is None: if args.lens_indices_path is None: # Test on all n_test lenses in the test set n_test = test_cfg.data.n_test lens_range = range(n_test) else: # Test on the lens indices in a text file at the specified path lens_range = [] with open(args.lens_indices_path, "r") as f: for line in f: lens_range.append(int(line.strip())) n_test = len(lens_range) print("Performing H0 inference on {:d} specified lenses...".format( n_test)) else: if args.lens_indices_path is None: # Test on the lens indices specified in the test config file lens_range = test_cfg.data.lens_indices n_test = len(lens_range) print("Performing H0 inference on {:d} specified lenses...".format( n_test)) else: raise ValueError( "Specific lens indices were specified in both the test config file and the command-line argument." ) batch_size = max(lens_range) + 1 test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, drop_last=True) # Output directory into which the H0 histograms and H0 samples will be saved out_dir = test_cfg.out_dir if not os.path.exists(out_dir): os.makedirs(out_dir) print("Destination folder path: {:s}".format(out_dir)) else: raise OSError("Destination folder already exists.") ###################### # Load trained state # ###################### # Instantiate loss function, to append to the MCMC objective as the prior orig_Y_cols = train_val_cfg.data.Y_cols loss_fn = getattr(h0rton.losses, train_val_cfg.model.likelihood_class)( Y_dim=train_val_cfg.data.Y_dim, device=device) # Instantiate MCMC parameter penalty function params_to_remove = ['lens_light_R_sersic'] #, 'src_light_R_sersic'] mcmc_Y_cols = [col for col in orig_Y_cols if col not in params_to_remove] mcmc_Y_dim = len(mcmc_Y_cols) mcmc_loss_fn = getattr( h0rton.losses, train_val_cfg.model.likelihood_class)( Y_dim=train_val_cfg.data.Y_dim - len(params_to_remove), device=device) remove_param_idx, remove_idx = mcmc_utils.get_idx_for_params( mcmc_loss_fn.out_dim, orig_Y_cols, params_to_remove, train_val_cfg.model.likelihood_class) mcmc_train_Y_mean = np.delete(train_val_cfg.data.train_Y_mean, remove_param_idx) mcmc_train_Y_std = np.delete(train_val_cfg.data.train_Y_std, remove_param_idx) parameter_penalty = mcmc_utils.HybridBNNPenalty( mcmc_Y_cols, train_val_cfg.model.likelihood_class, mcmc_train_Y_mean, mcmc_train_Y_std, test_cfg.h0_posterior.exclude_velocity_dispersion, device) custom_logL_addition = parameter_penalty.evaluate if test_cfg.lens_posterior_type.startswith( 'default') else None null_spread = True if test_cfg.lens_posterior_type == 'truth' else False # Instantiate model net = getattr( h0rton.models, train_val_cfg.model.architecture)(num_classes=loss_fn.out_dim) net.to(device) # Load trained weights from saved state net, epoch = train_utils.load_state_dict_test(test_cfg.state_dict_path, net, train_val_cfg.optim.n_epochs, device) with torch.no_grad(): net.eval() for X_ in test_loader: X = X_.to(device) pred = net(X) break mcmc_pred = pred.cpu().numpy() mcmc_pred = mcmc_utils.remove_parameters_from_pred(mcmc_pred, remove_idx, return_as_tensor=False) # Instantiate posterior for BNN samples, to initialize the walkers bnn_post = getattr(h0rton.h0_inference.gaussian_bnn_posterior, loss_fn.posterior_name)(mcmc_Y_dim, device, mcmc_train_Y_mean, mcmc_train_Y_std) bnn_post.set_sliced_pred(torch.tensor(mcmc_pred)) n_walkers = test_cfg.numerics.mcmc.walkerRatio * ( mcmc_Y_dim + 1) # BNN params + H0 times walker ratio init_pos = bnn_post.sample( n_walkers, sample_seed=test_cfg.global_seed ) # [batch_size, n_walkers, mcmc_Y_dim] contains just the lens model params, no D_dt init_D_dt = np.random.uniform(0.0, 10000.0, size=(batch_size, n_walkers, 1)) # FIXME: init H0 hardcoded kwargs_model = dict(lens_model_list=['PEMD', 'SHEAR'], point_source_model_list=['SOURCE_POSITION'], source_light_model_list=['SERSIC_ELLIPSE']) astro_sig = test_cfg.image_position_likelihood.sigma # Get H0 samples for each system if not test_cfg.time_delay_likelihood.baobab_time_delays: if 'abcd_ordering_i' not in master_truth: raise ValueError( "If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec." ) lenses_skipped = [] # keeps track of lenses that skipped MCMC total_progress = tqdm(total=n_test) # For each lens system... for i, lens_i in enumerate(lens_range): # Each lens gets a unique random state for td and vd measurement error realizations. rs_lens = np.random.RandomState(lens_i) ########################### # Relevant data and prior # ########################### data_i = master_truth.iloc[lens_i].copy() parameter_penalty.set_bnn_post_params( mcmc_pred[lens_i, :]) # set the BNN parameters # Init values for the lens model params if test_cfg.lens_posterior_type == 'default': init_info = dict( zip( mcmc_Y_cols, mcmc_pred[lens_i, :len(mcmc_Y_cols)] * mcmc_train_Y_std + mcmc_train_Y_mean)) # mean of primary Gaussian else: # types 'hybrid_with_truth_mean' and 'truth' init_info = dict(zip(mcmc_Y_cols, data_i[mcmc_Y_cols].values)) # truth params if not test_cfg.h0_posterior.exclude_velocity_dispersion: parameter_penalty.set_vel_disp_params() raise NotImplementedError lcdm = LCDM(z_lens=data_i['z_lens'], z_source=data_i['z_src'], flat=True) # Data is BCD - A with a certain ABCD ordering, so inferred time delays should follow this convention. measured_td_wrt0 = np.array(data_i['measured_td']) # [n_img - 1,] measured_td_sig = np.array(data_i['measured_td_err']) # [n_img - 1,] abcd_ordering_i = np.array(data_i['abcd_ordering_i']) n_img = len(abcd_ordering_i) kwargs_data_joint = dict( time_delays_measured=measured_td_wrt0, time_delays_uncertainties=measured_td_sig, abcd_ordering_i=abcd_ordering_i, #vel_disp_measured=measured_vd, # TODO: optionally exclude #vel_disp_uncertainty=vel_disp_sig, ) if not test_cfg.h0_posterior.exclude_velocity_dispersion: measured_vd = data_i['true_vd'] * ( 1.0 + rs_lens.randn() * test_cfg.error_model.velocity_dispersion_frac_error) kwargs_data_joint['vel_disp_measured'] = measured_vd kwargs_data_joint[ 'vel_disp_sig'] = test_cfg.velocity_dispersion_likelihood.sigma ############################# # Parameter init and bounds # ############################# lens_kwargs = mcmc_utils.get_lens_kwargs(init_info, null_spread=null_spread) ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(init_info, astro_sig, null_spread=null_spread) src_light_kwargs = mcmc_utils.get_light_kwargs( init_info['src_light_R_sersic'], null_spread=null_spread) special_kwargs = mcmc_utils.get_special_kwargs( n_img, astro_sig, null_spread=null_spread ) # image position offset and time delay distance, aka the "special" parameters kwargs_params = { 'lens_model': lens_kwargs, 'point_source_model': ps_kwargs, 'source_model': src_light_kwargs, 'special': special_kwargs, } if test_cfg.numerics.solver_type == 'NONE': solver_type = 'NONE' else: solver_type = 'PROFILE_SHEAR' if n_img == 4 else 'CENTER' #solver_type = 'NONE' kwargs_constraints = { 'num_point_source_list': [n_img], 'Ddt_sampling': True, 'solver_type': solver_type, } kwargs_likelihood = { 'time_delay_likelihood': True, 'sort_images_by_dec': True, 'prior_lens': [], 'prior_special': [], 'check_bounds': True, 'check_matched_source_position': False, 'source_position_tolerance': 0.01, 'source_position_sigma': 0.01, 'source_position_likelihood': False, 'custom_logL_addition': custom_logL_addition, } ########################### # MCMC posterior sampling # ########################### fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, verbose=False, mpi=False) if i == 0: param_class = fitting_seq._updateManager.param_class n_params, param_class_Y_cols = param_class.num_param() init_pos = mcmc_utils.reorder_to_param_class( mcmc_Y_cols, param_class_Y_cols, init_pos, init_D_dt) # MCMC sample from the post-processed BNN posterior jointly with cosmology lens_i_start_time = time.time() if test_cfg.lens_posterior_type == 'default': test_cfg.numerics.mcmc.update(init_samples=init_pos[lens_i, :, :]) fitting_kwargs_list_mcmc = [['MCMC', test_cfg.numerics.mcmc]] #with HiddenPrints(): try: chain_list_mcmc = fitting_seq.fit_sequence( fitting_kwargs_list_mcmc) kwargs_result_mcmc = fitting_seq.best_fit() except: print("lens {:d} skipped".format(lens_i)) total_progress.update(1) lenses_skipped.append(lens_i) continue lens_i_end_time = time.time() inference_time = (lens_i_end_time - lens_i_start_time) / 60.0 # min ############################# # Plotting the MCMC samples # ############################# # sampler_type : 'EMCEE' # samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]` # param_mcmc : list of str of length n_params, the parameter names sampler_type, samples_mcmc, param_mcmc, _ = chain_list_mcmc[0] new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain( kwargs_result_mcmc, samples_mcmc, kwargs_model, lens_kwargs[2], ps_kwargs[2], src_light_kwargs[2], special_kwargs[2], kwargs_constraints) # Plot D_dt histogram D_dt_samples = new_samples_mcmc['D_dt'].values true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.27) data_i['D_dt'] = true_D_dt # Export D_dt samples for this lens lens_inference_dict = dict( D_dt_samples=D_dt_samples, # kappa_ext=0 for these samples inference_time=inference_time, true_D_dt=true_D_dt, ) lens_inference_dict_save_path = os.path.join( out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i)) np.save(lens_inference_dict_save_path, lens_inference_dict) # Optionally export the MCMC samples if test_cfg.export.mcmc_samples: mcmc_samples_path = os.path.join( out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i)) new_samples_mcmc.to_csv(mcmc_samples_path, index=None) # Optionally export the D_dt histogram if test_cfg.export.D_dt_histogram: cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal( D_dt_samples, 3) _ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples, lens_i, true_D_dt, save_dir=out_dir) # Optionally export the plot of MCMC chain if test_cfg.export.mcmc_chain: mcmc_chain_path = os.path.join( out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i)) plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path) # Optionally export posterior cornerplot of select lens model parameters with D_dt if test_cfg.export.mcmc_corner: mcmc_corner_path = os.path.join( out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i)) plotting_utils.plot_mcmc_corner( new_samples_mcmc[test_cfg.export.mcmc_cols], None, test_cfg.export.mcmc_col_labels, mcmc_corner_path) total_progress.update(1) total_progress.close()
class ClsrWorkflow(object): def __init__(self, kwargs_data_joint, kwargs_model,lens_params,source_params, lenslight_params=None, kwargs_constraints=None, kwargs_likelihood=None): """ class to manage cluster source reconstruction. This class inherited the FittingSequence class in Workflow module of lenstronomy. :param kwargs_data_joint: keywords arguments of [data, psf, numericals] in lenstronomy convention. :param kwargs_model: name of model list :param lens_params: lens model keywords arguments [kwargs_lens_init, kwargs_lens_sigma, kwargs_fixed_lens, kwargs_lower_lens, kwargs_upper_lens] :param source_params: source model keywords arguments [kwargs_source_init, kwargs_source_sigma, kwargs_fixed_source, kwargs_lower_source, kwargs_upper_source] :param kwargs_constraints: contraints on models :param kwargs_likelihood: options of calculating likelihood, see more: LikelihoodModule class in Sampling module of lenstronomy. """ self.kwargs_data_joint =kwargs_data_joint self.multi_band_list = kwargs_data_joint.get('multi_band_list', []) self.kwargs_model =kwargs_model kwargs_params = {'lens_model': lens_params, 'source_model': source_params, 'lens_light_model': lenslight_params} self.kwargs_params= kwargs_params if kwargs_constraints is None: kwargs_constraints ={} if kwargs_likelihood is None: kwargs_likelihood = {'source_marg': False, 'check_positive_flux': True} self.fitting_seq_src = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) def run_fit_sequence(self,fitting_kwargs_list): """ :param fitting_kwargs_list: list of [['string', {kwargs}], ..] with 'string being the specific fitting option and kwargs being the arguments passed to this option :return: fitting results """ chain_list = self.fitting_seq_src.fit_sequence(fitting_kwargs_list) kwargs_result = self.fitting_seq_src.best_fit(bijective=False) bic_model = self.fitting_seq_src.bic return bic_model,chain_list, kwargs_result def lensmodel_comp(self, num_img, n_particles,n_iterations,sigma_scale, num_lens_model,fixed_index =0,flexion_option=True): """ function to figure out the necessary of increasing lens model complexity. Currently, we only consider up to flexion term. :param n_particles: particles numbers of PSO :param n_iterations: iteration numbers of PSO :param sigma_scale: sigma scale for PSO :param num_img: int, numbers of lensed image :param fixed_index: int, index of fixed lensed image :param num_lens_model: numbers of strings contained in lens model list :param flexion_option: bool, default is taking flexion into consideration :return: pso results, fitting results of necessary lens model complexity, bic values """ lens_remove_fixed_list = [] lens_add_fixed_list = [] for i in range(num_img): if i == fixed_index: print ("lens model keep fixed in frame:", i+1) else: lens_flexion_index = (i + 1) * num_lens_model - 1 lens_remove_fixed_list.append([lens_flexion_index, ['G1', 'G2', 'F1', 'F2'], [0, 0, 0, 0]]) G1_fix = self.kwargs_params['lens_model'][2][lens_flexion_index]['G1'] G2_fix = self.kwargs_params['lens_model'][2][lens_flexion_index]['G2'] F1_fix = self.kwargs_params['lens_model'][2][lens_flexion_index]['F1'] F2_fix = self.kwargs_params['lens_model'][2][lens_flexion_index]['F2'] lens_add_fixed_list.append([lens_flexion_index, ['G1', 'G2', 'F1', 'F2'], [G1_fix, G2_fix, F1_fix, F2_fix]]) flexion_add_fixed = [['update_settings', {'lens_add_fixed': lens_add_fixed_list}]] print("flexion_fixed:", flexion_add_fixed) kwargs_pso = [['PSO', {'sigma_scale': sigma_scale, 'n_particles': n_particles, 'n_iterations': n_iterations}]] fitting_kwargs_fix = flexion_add_fixed+ kwargs_pso bic_model_fix, chain_list_fix, kwargs_result_fix = self.run_fit_sequence(fitting_kwargs_fix) if flexion_option: flexion_remove_fixed = [['update_settings', {'lens_remove_fixed': lens_remove_fixed_list}]] print ("flexion_remove_fixed:", flexion_remove_fixed) fitting_kwargs_free = flexion_remove_fixed + kwargs_pso bic_model_free, chain_list_free, kwargs_result_free = self.run_fit_sequence(fitting_kwargs_free) else: bic_model_free = 10000000 if bic_model_free > bic_model_fix: print ("No necessary to add flexion!") bic_list = [bic_model_fix] chain_list = [chain_list_fix] kwargs_result_list = [kwargs_result_fix] self._update_kwargs(kwargs_result_fix) _, _, _ = self.run_fit_sequence(flexion_add_fixed) elif bic_model_free < bic_model_fix: print("Flexion is needed!") bic_list = [bic_model_fix, bic_model_free] chain_list = [chain_list_fix] kwargs_result_list = [kwargs_result_fix] return chain_list, kwargs_result_list, bic_list def sourcemodel_comp(self, n_max_range=[0], sr = 0, n_particles=10, n_iterations=10,sigma_scale =1.0, bic_model_in = [100000], chain_list_in = [0], kwargs_results_in = [0], bic_option=True) : """ function to found the best fitting results among source models related to numbers of shapelets basis :param n_max_range: shapelets basis selection range :param sr: typical scale (") in source plane :param n_particles: particles numbers of PSO :param n_iterations: iteration numbers of PSO :param sigma_scale: sigma scale for PSO :param bic_model_in: BIC value of models before adding shapelets to source model :param chain_list_in: PSO chain results of models before adding shapelets to source model :param kwargs_results_in: fitting results of models before adding shapelets to source model :return: best-fit PSO results, best-fits results, PSO results for n_max_range, fitting results for n_max_range, bic values for all models """ bic_model_list = bic_model_in chain_list_list = chain_list_in kwargs_result_list = kwargs_results_in bic_in_len = len(bic_model_in) bic_run = True beta0 = sr kwargs_pso = [['PSO', {'sigma_scale': sigma_scale, 'n_particles': n_particles, 'n_iterations': n_iterations}]] for nmax in n_max_range: if nmax < 0: raise ValueError("nmax can not be negative!",nmax) else: if nmax == n_max_range[0]: start_kwargs_shapelet = [['update_settings', {'source_remove_fixed': [ [1, ['beta'], [beta0]] ]}]] else: start_kwargs_shapelet = [] beta_nmax = ((nmax + 1)) ** 0.5 * beta0 fit_kwargs_shapelet = [['update_settings', {'source_add_fixed': [[1, ['n_max'], [nmax]]], 'change_source_lower_limit': [[1, ['beta'], [beta_nmax]]] } ]] fitting_kwargs = start_kwargs_shapelet + fit_kwargs_shapelet + kwargs_pso if bic_run: print ("nmax",nmax,"fitting_kwargs",fitting_kwargs) bic_model,chain_list, kwargs_result = self.run_fit_sequence(fitting_kwargs) if bic_model > bic_model_list[-1]: if bic_option: bic_run = False if bic_model > bic_model_in[bic_in_len-1]: print ("bic_model_in",bic_model_in) print ("no necessary to add SHAPELETS !") fix_kwargs_shapelet=[['update_settings', {'source_add_fixed': [[1, ['beta'], [sr]]]}]] _, _, _ = self.run_fit_sequence(fix_kwargs_shapelet) elif not bic_option: chain_list_list.append(chain_list) kwargs_result_list.append(kwargs_result) bic_model_list.append(bic_model) print ("no necessary to increase model complexity!") elif bic_model < bic_model_list[-1]: chain_list_list.append(chain_list) kwargs_result_list.append(kwargs_result) bic_model_list.append(bic_model) print (bic_model, "currently is the lowest BIC value in bic_model_list=", bic_model_list) bic_sourcemodel = bic_model_list[bic_in_len:] if bic_sourcemodel ==[]: chain_list_lowest = chain_list_in[-1] kwargs_result_lowest = kwargs_results_in[-1] else: index_bic_minima = np.where(bic_model_list == np.min(bic_model_list))[0][0] chain_list_lowest = chain_list_list[index_bic_minima] kwargs_result_lowest = kwargs_result_list[index_bic_minima] return chain_list_lowest, kwargs_result_lowest, chain_list_list, kwargs_result_list, bic_model_list def _update_kwargs(self,kwargs_result): """ :param kwargs_result: fitting results of a specific state :return: go back to a specific state """ self.fitting_seq_src.update_state(kwargs_result)
def swim(self, lens_name, model_id, log=True, mpi=False, recipe_name='default', sampler='EMCEE', thread_count=1): """ Run models for a single lens. :param lens_name: lens name :type lens_name: `str` :param model_id: identifier for the model run :type model_id: `str` :param log: if `True`, all `print` statements will be logged :type log: `bool` :param mpi: MPI option :type mpi: `bool` :param recipe_name: recipe for pre-sampling optimization, supported ones now: 'default' and 'galaxy-galaxy' :type recipe_name: `str` :param sampler: 'EMCEE' or 'COSMOHAMMER', cosmohammer is kept for legacy :type sampler: `str` :param thread_count: number of threads if `multiprocess` is used :type thread_count: `int` :return: :rtype: """ pool = choose_pool(mpi=mpi) if log and pool.is_master(): log_file = open(self.file_system.get_log_file_path(lens_name, model_id), 'wt') sys.stdout = log_file config = self.get_lens_config(lens_name) recipe = Recipe(config, sampler=sampler, thread_count=thread_count) psf_supersampling_factor = config.get_psf_supersampled_factor() kwargs_data_joint = self.get_kwargs_data_joint( lens_name, psf_supersampled_factor=psf_supersampling_factor) fitting_sequence = FittingSequence( kwargs_data_joint, config.get_kwargs_model(), config.get_kwargs_constraints(), config.get_kwargs_likelihood(), config.get_kwargs_params(), mpi=mpi ) fitting_kwargs_list = recipe.get_recipe( kwargs_data_joint=kwargs_data_joint, recipe_name=recipe_name) fit_output = fitting_sequence.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_sequence.best_fit(bijective=False) output = { 'settings': config.settings, 'kwargs_result': kwargs_result, 'fit_output': fit_output, } if pool.is_master(): self.file_system.save_output(lens_name, model_id, output) if log and pool.is_master(): log_file.close()
kwargs_lens_light_init = [] kwargs_lens_light_sigma = [] kwargs_lower_lens_light = [] kwargs_upper_lens_light = [] # first Sersic component fixed_lens_light.append({}) kwargs_lens_light_init.append({'R_sersic': .1, 'n_sersic': 4, 'e1': 0, 'e2': 0, 'center_x': 0, 'center_y': 0}) kwargs_lens_light_sigma.append({'n_sersic': 0.5, 'R_sersic': 0.2, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1}) kwargs_lower_lens_light.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -10, 'center_y': -10}) kwargs_upper_lens_light.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 10, 'n_sersic': 8, 'center_x': 10, 'center_y': 10}) lens_light_params = [kwargs_lens_light_init, kwargs_lens_light_sigma, fixed_lens_light, kwargs_lower_lens_light, kwargs_upper_lens_light] kwargs_params = {'lens_light_model': lens_light_params} fitting_seq = FittingSequence(multi_band_list, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) # n_particles is the number of particles when you can test the parementers, so if you have 5 the code will be tested in 5 positions # more particles means the fitting is tested in more places so the guess will not converge a local minimum # but more particles is also time consuming # n_iterations is the number of iterations in the code and there is no trivial way to know how many are necesary to make the code converge fitting_kwargs_list = [{'fitting_routine': 'PSO', 'mpi': False, 'sigma_scale': 1., 'n_particles': 100,'n_iterations': 90}] lens_result, source_result, lens_light_result, ps_result, cosmo_result, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list) lensPlot = LensModelPlot(kwargs_data_mask, kwargs_psf, kwargs_numerics, kwargs_model, lens_result, source_result, lens_light_result, ps_result, arrow_size=0.02, cmap_string="gist_heat") # this contain the best parameters from fitting the light of the galaxy (light, ellipticity and position) LLR = lens_light_result[0]
'n_sersic': 6, 'center_x': (x0blob + 4) * deltaPix, 'center_y': ((60 - y0blob) + 4) * deltaPix }) lens_light_params = [ kwargs_lens_light_init, kwargs_lens_light_sigma, fixed_lens_light, kwargs_lower_lens_light, kwargs_upper_lens_light ] kwargs_params = {'lens_light_model': lens_light_params} from lenstronomy.Workflow.fitting_sequence import FittingSequence fitting_seqI = FittingSequence(multi_band_listI, kwargs_modelI, kwargs_constraints, kwargs_likelihood, kwargs_params) fitting_kwargs_list = [[ 'MCMC', { 'n_burn': 100, 'n_run': 100, 'walkerRatio': 10, 'sigma_scale': .1 } ]] chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seqI.fit_sequence( fitting_kwargs_list) lens_result, source_result, lens_light_resultI, ps_result, cosmo_result = fitting_seqI.best_fit( )
def main(): args = parse_args() test_cfg = TestConfig.from_file(args.test_config_file_path) train_val_cfg = TrainValConfig.from_file(test_cfg.train_val_config_file_path) baobab_cfg = get_baobab_config(test_cfg.data.test_dir) # Set device and default data type device = torch.device(test_cfg.device_type) if device.type == 'cuda': torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') seed_everything(test_cfg.global_seed) ############ # Data I/O # ############ test_data = XYCosmoData(test_cfg.data.test_dir, data_cfg=train_val_cfg.data) master_truth = test_data.cosmo_df master_truth = metadata_utils.add_qphi_columns(master_truth) master_truth = metadata_utils.add_gamma_psi_ext_columns(master_truth) if test_cfg.data.lens_indices is None: if args.lens_indices_path is None: # Test on all n_test lenses in the test set n_test = test_cfg.data.n_test lens_range = range(n_test) else: # Test on the lens indices in a text file at the specified path lens_range = [] with open(args.lens_indices_path, "r") as f: for line in f: lens_range.append(int(line.strip())) n_test = len(lens_range) print("Performing H0 inference on {:d} specified lenses...".format(n_test)) else: if args.lens_indices_path is None: # Test on the lens indices specified in the test config file lens_range = test_cfg.data.lens_indices n_test = len(lens_range) print("Performing H0 inference on {:d} specified lenses...".format(n_test)) else: raise ValueError("Specific lens indices were specified in both the test config file and the command-line argument.") batch_size = max(lens_range) + 1 # Output directory into which the H0 histograms and H0 samples will be saved out_dir = test_cfg.out_dir if not os.path.exists(out_dir): os.makedirs(out_dir) print("Destination folder path: {:s}".format(out_dir)) else: raise OSError("Destination folder already exists.") ###################### # Load trained state # ###################### # Instantiate loss function, to append to the MCMC objective as the prior orig_Y_cols = train_val_cfg.data.Y_cols # Instantiate MCMC parameter penalty function params_to_remove = ['lens_light_R_sersic'] #'src_light_R_sersic'] mcmc_Y_cols = [col for col in orig_Y_cols if col not in params_to_remove] mcmc_Y_dim = len(mcmc_Y_cols) null_spread = True #init_D_dt = np.random.uniform(0.0, 10000.0, size=(batch_size, n_walkers, 1)) # FIXME: init H0 hardcoded kwargs_model = dict(lens_model_list=['PEMD', 'SHEAR'], point_source_model_list=['SOURCE_POSITION'], source_light_model_list=['SERSIC_ELLIPSE']) astro_sig = test_cfg.image_position_likelihood.sigma # Get H0 samples for each system if not test_cfg.time_delay_likelihood.baobab_time_delays: if 'abcd_ordering_i' not in master_truth: raise ValueError("If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec.") kwargs_lens_eq_solver = {'min_distance': 0.05, 'search_window': baobab_cfg.instrument.pixel_scale*baobab_cfg.image.num_pix, 'num_iter_max': 100} #n_walkers = test_cfg.numerics.mcmc.walkerRatio*(mcmc_Y_dim + 1) # BNN params + H0 times walker ratio #init_pos = np.tile(master_truth[mcmc_Y_cols].iloc[:batch_size].values[:, np.newaxis, :], [1, n_walkers, 1]) #init_D_dt = np.random.uniform(0.0, 10000.0, size=(batch_size, n_walkers, 1)) #print(init_pos.shape, init_D_dt.shape) total_progress = tqdm(total=n_test) # For each lens system... for i, lens_i in enumerate(lens_range): # Each lens gets a unique random state for td and vd measurement error realizations. rs_lens = np.random.RandomState(lens_i) ########################### # Relevant data and prior # ########################### data_i = master_truth.iloc[lens_i].copy() # Init values for the lens model params init_info = dict(zip(mcmc_Y_cols, data_i[mcmc_Y_cols].values)) # truth params lcdm = LCDM(z_lens=data_i['z_lens'], z_source=data_i['z_src'], flat=True) true_img_dec = np.array(literal_eval(data_i['y_image'])) n_img = len(true_img_dec) true_td = np.array(literal_eval(data_i['true_td'])) measured_td = true_td + rs_lens.randn(*true_td.shape)*test_cfg.error_model.time_delay_error measured_td_sig = test_cfg.time_delay_likelihood.sigma # np.ones(n_img - 1)* measured_img_dec = true_img_dec + rs_lens.randn(n_img)*astro_sig increasing_dec_i = np.argsort(true_img_dec) #np.argsort(measured_img_dec) measured_td = h0_utils.reorder_to_tdlmc(measured_td, increasing_dec_i, range(n_img)) # need to use measured dec to order measured_img_dec = h0_utils.reorder_to_tdlmc(measured_img_dec, increasing_dec_i, range(n_img)) measured_td_wrt0 = measured_td[1:] - measured_td[0] kwargs_data_joint = dict(time_delays_measured=measured_td_wrt0, time_delays_uncertainties=measured_td_sig, ) ############################# # Parameter init and bounds # ############################# lens_kwargs = mcmc_utils.get_lens_kwargs(init_info, null_spread=null_spread) ps_kwargs = mcmc_utils.get_ps_kwargs_src_plane(init_info, astro_sig, null_spread=null_spread) src_light_kwargs = mcmc_utils.get_light_kwargs(init_info['src_light_R_sersic'], null_spread=null_spread) special_kwargs = mcmc_utils.get_special_kwargs(n_img, astro_sig, D_dt_sigma=2000, null_spread=null_spread) # image position offset and time delay distance, aka the "special" parameters kwargs_params = {'lens_model': lens_kwargs, 'point_source_model': ps_kwargs, 'source_model': src_light_kwargs, 'special': special_kwargs,} if test_cfg.numerics.solver_type == 'NONE': solver_type = 'NONE' else: solver_type = 'PROFILE_SHEAR' if n_img == 4 else 'CENTER' #solver_type = 'NONE' kwargs_constraints = {'num_point_source_list': [n_img], 'Ddt_sampling': True, 'solver_type': solver_type,} kwargs_likelihood = {'time_delay_likelihood': True, 'sort_images_by_dec': True, 'prior_lens': [], 'prior_special': [], 'check_bounds': True, 'check_matched_source_position': False, 'source_position_tolerance': 0.01, 'source_position_sigma': 0.01, 'source_position_likelihood': False, 'custom_logL_addition': None, 'kwargs_lens_eq_solver': kwargs_lens_eq_solver} ########################### # MCMC posterior sampling # ########################### fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, verbose=False, mpi=False) if i == 0: param_class = fitting_seq._updateManager.param_class n_params, param_class_Y_cols = param_class.num_param() #init_pos = mcmc_utils.reorder_to_param_class(mcmc_Y_cols, param_class_Y_cols, init_pos, init_D_dt) # MCMC sample from the post-processed BNN posterior jointly with cosmology lens_i_start_time = time.time() #test_cfg.numerics.mcmc.update(init_samples=init_pos[lens_i, :, :]) fitting_kwargs_list_mcmc = [['MCMC', test_cfg.numerics.mcmc]] #with HiddenPrints(): #try: chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list_mcmc) kwargs_result_mcmc = fitting_seq.best_fit() #except: # print("lens {:d} skipped".format(lens_i)) # total_progress.update(1) # continue lens_i_end_time = time.time() inference_time = (lens_i_end_time - lens_i_start_time)/60.0 # min ############################# # Plotting the MCMC samples # ############################# # sampler_type : 'EMCEE' # samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]` # param_mcmc : list of str of length n_params, the parameter names sampler_type, samples_mcmc, param_mcmc, _ = chain_list_mcmc[0] new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain(kwargs_result_mcmc, samples_mcmc, kwargs_model, lens_kwargs[2], ps_kwargs[2], src_light_kwargs[2], special_kwargs[2], kwargs_constraints) # Plot D_dt histogram D_dt_samples = new_samples_mcmc['D_dt'].values true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.3) data_i['D_dt'] = true_D_dt # Export D_dt samples for this lens lens_inference_dict = dict( D_dt_samples=D_dt_samples, # kappa_ext=0 for these samples inference_time=inference_time, true_D_dt=true_D_dt, ) lens_inference_dict_save_path = os.path.join(out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i)) np.save(lens_inference_dict_save_path, lens_inference_dict) # Optionally export the MCMC samples if test_cfg.export.mcmc_samples: mcmc_samples_path = os.path.join(out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i)) new_samples_mcmc.to_csv(mcmc_samples_path, index=None) # Optionally export the D_dt histogram if test_cfg.export.D_dt_histogram: cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal(D_dt_samples, 3) _ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples, lens_i, true_D_dt, save_dir=out_dir) # Optionally export the plot of MCMC chain if test_cfg.export.mcmc_chain: mcmc_chain_path = os.path.join(out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i)) plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path) # Optionally export posterior cornerplot of select lens model parameters with D_dt if test_cfg.export.mcmc_corner: mcmc_corner_path = os.path.join(out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i)) plotting_utils.plot_mcmc_corner(new_samples_mcmc[test_cfg.export.mcmc_cols], data_i[test_cfg.export.mcmc_cols], test_cfg.export.mcmc_col_labels, mcmc_corner_path) total_progress.update(1) gc.collect() total_progress.close()