def __call__(self, observable_dict): """ :param observable_dict: Simulations object :return: log-likelihood value """ assert isinstance(observable_dict, Simulations) # check dict entries assert (observable_dict.keys() == self._measurement_dict.keys()) likelicache = float(0) if self._covariance_dict is None: for name in self._measurement_dict.keys(): obs_mean, obs_cov = oas_mcov(observable_dict[name]) data = deepcopy(self._measurement_dict[name].to_global_data()) diff = np.nan_to_num(data - obs_mean) if obs_cov.trace( ) < 1E-28: # zero will not be reached, at most E-32 likelicache += -float(0.5) * float(np.vdot(diff, diff)) else: sign, logdet = np.linalg.slogdet(obs_cov * 2. * np.pi) likelicache += -float(0.5) * float( np.vdot(diff, np.linalg.solve(obs_cov, diff.T)) + sign * logdet) else: for name in self._measurement_dict.keys(): obs_mean, obs_cov = oas_mcov(observable_dict[name]) data = deepcopy(self._measurement_dict[name].to_global_data()) diff = np.nan_to_num(data - obs_mean) if name in self._covariance_dict.keys( ): # not all measurements have cov full_cov = deepcopy( self._covariance_dict[name].to_global_data()) + obs_cov if full_cov.trace( ) < 1E-28: # zero will not be reached, at most E-32 likelicache += -float(0.5) * float(np.vdot(diff, diff)) else: sign, logdet = np.linalg.slogdet(full_cov * 2. * np.pi) likelicache += -float(0.5) * float( np.vdot(diff, np.linalg.solve(full_cov, diff.T)) + sign * logdet) else: if obs_cov.trace( ) < 1E-28: # zero will not be reached, at most E-32 likelicache += -float(0.5) * float(np.vdot(diff, diff)) else: sign, logdet = np.linalg.slogdet(obs_cov * 2. * np.pi) likelicache += -float(0.5) * float( np.vdot(diff, np.linalg.solve(obs_cov, diff.T)) + sign * logdet) return likelicache
def test_oas_mcov(self): # mock observable ensemble with identical realizations arr = np.random.rand(1, 32) comm.Bcast(arr, root=0) null_cov = np.zeros((32, 32)) # ensemble with identical realisations mean, local_cov = oas_mcov(arr) full_cov = np.vstack(comm.allgather(local_cov)) for k in range(mean.shape[1]): self.assertAlmostEqual(mean[0][k], arr[0][k]) for i in range(full_cov.shape[0]): for j in range(full_cov.shape[1]): self.assertAlmostEqual(null_cov[i, j], full_cov[i, j])
def oas_estimator_timing(data_size): local_row_size = mpi_arrange(data_size)[1] - mpi_arrange(data_size)[0] random_data = np.random.rand(local_row_size, data_size) tmr = Timer() tmr.tick('oas_estimator') mean, local_cov = oas_mcov(random_data) tmr.tock('oas_estimator') if not mpirank: print('@ tools_profiles::oas_estimator_timing with ' + str(mpisize) + ' nodes') print('global matrix size (' + str(data_size) + ',' + str(data_size) + ')') print('elapse time ' + str(tmr.record['oas_estimator']) + '\n')
def test_oas(self): # mock observable arr_a = np.random.rand(1, 4) comm.Bcast(arr_a, root=0) arr_ens = np.zeros((3, 4)) null_cov = np.zeros((4, 4)) # ensemble with identical realisations for i in range(len(arr_ens)): arr_ens[i] = arr_a dtuple = DomainTuple.make((RGSpace(3 * mpisize), RGSpace(4))) obs = Observable(dtuple, arr_ens) test_mean, test_cov = oas_mcov(obs) for i in range(len(arr_a)): self.assertAlmostEqual(test_mean[0][i], arr_a[0][i]) for j in range(len(arr_a)): self.assertAlmostEqual(test_cov[i][j], null_cov[i][j])
def testfield(measure_size, simulation_size, make_plots=True, debug=False): if debug: log.basicConfig(filename='imagine_li_dynesty.log', level=log.DEBUG) else: log.basicConfig(filename='imagine_li_dynesty.log') """ :return: log.basicConfig(filename='imagine.log', level=log.INFO) """ """ # step 0, set 'a' and 'b', 'mea_std' TestField in LiSimulator is modeled as field = gaussian_random(mean=a,std=b)_x * cos(x) where x in (0,2pi) for generating mock data we need true values of a and b: true_a, true_b, mea_seed measurement uncertainty: mea_std measurement points, positioned in (0,2pi) evenly, due to TestField modelling """ true_a = 3. true_b = 6. mea_std = 0.1 # std of gaussian measurement error mea_seed = 233 truths = [true_a, true_b] # will be used in visualizing posterior """ # step 1, prepare mock data """ """ # 1.1, generate measurements mea_field = signal_field + noise_field """ x = np.linspace(0, 2. * np.pi, measure_size) # data points in measurements np.random.seed(mea_seed) # seed for signal field signal_field = np.multiply( np.cos(x), np.random.normal(loc=true_a, scale=true_b, size=measure_size)) mea_field = np.vstack([ signal_field + np.random.normal(loc=0., scale=mea_std, size=measure_size) ]) """ # 1.2, generate covariances what's the difference between pre-define dan re-estimated? """ # re-estimate according to measurement error mea_repeat = np.zeros((simulation_size, measure_size)) for i in range(simulation_size): # times of repeated measurements mea_repeat[i, :] = signal_field + np.random.normal( loc=0., scale=mea_std, size=measure_size) mea_cov = oas_mcov(mea_repeat)[1] print(mpirank, 're-estimated: \n', mea_cov, 'slogdet', mpi_slogdet(mea_cov)) # pre-defined according to measurement error mea_cov = (mea_std**2) * mpi_eye(measure_size) print(mpirank, 'pre-defined: \n', mea_cov, 'slogdet', mpi_slogdet(mea_cov)) """ # 1.3 assemble in imagine convention """ mock_data = Measurements() # create empty Measrurements object mock_cov = Covariances() # create empty Covariance object # pick up a measurement mock_data.append(('test', 'nan', str(measure_size), 'nan'), mea_field, True) mock_cov.append(('test', 'nan', str(measure_size), 'nan'), mea_cov, True) """ # 1.4, visualize mock data """ if mpirank == 0 and make_plots: plt.plot(x, mock_data[('test', 'nan', str(measure_size), 'nan')].data[0]) plt.savefig('testfield_mock_li.pdf') """ # step 2, prepare pipeline and execute analysis """ """ # 2.1, ensemble likelihood """ likelihood = EnsembleLikelihood( mock_data, mock_cov) # initialize likelihood with measured info """ # 2.2, field factory list """ factory = TestFieldFactory( active_parameters=('a', 'b')) # factory with single active parameter factory.parameter_ranges = { 'a': (0, 10), 'b': (0, 10) } # adjust parameter range for Bayesian analysis factory_list = [factory] # likelihood requires a list/tuple of factories """ # 2.3, flat prior """ prior = FlatPrior() """ # 2.4, simulator """ simer = LiSimulator(mock_data) """ # 2.5, pipeline """ pipe = DynestyPipeline(simer, factory_list, likelihood, prior, simulation_size) pipe.random_type = 'controllable' # 'fixed' random_type doesnt work for Dynesty pipeline, yet pipe.seed_tracer = int(23) pipe.sampling_controllers = {'nlive': 400} tmr = Timer() tmr.tick('test') results = pipe() tmr.tock('test') if mpirank == 0: print('\n elapse time ' + str(tmr.record['test']) + '\n') """ # step 3, visualize (with corner package) """ if mpirank == 0 and make_plots: samples = results['samples'] for i in range(len( pipe.active_parameters)): # convert variables into parameters low, high = pipe.active_ranges[pipe.active_parameters[i]] for j in range(samples.shape[0]): samples[j, i] = unity_mapper(samples[j, i], low, high) # corner plot corner.corner(samples[:, :len(pipe.active_parameters)], range=[0.99] * len(pipe.active_parameters), quantiles=[0.02, 0.5, 0.98], labels=pipe.active_parameters, show_titles=True, title_kwargs={"fontsize": 15}, color='steelblue', truths=truths, truth_color='firebrick', plot_contours=True, hist_kwargs={'linewidth': 2}, label_kwargs={'fontsize': 20}) plt.savefig('testfield_posterior_li_dynesty.pdf')
def __call__(self, observable_dict): """ EnsembleLikelihood class call function Parameters ---------- observable_dict : imagine.observables.observable_dict.Simulations Simulations object Returns ------ likelicache : float log-likelihood value (copied to all nodes) """ log.debug('@ ensemble_likelihood::__call__') assert isinstance(observable_dict, Simulations) # check dict entries assert (observable_dict.keys() == self._measurement_dict.keys()) likelicache = float(0) if self._covariance_dict is None: for name in self._measurement_dict.keys(): obs_mean, obs_cov = oas_mcov( observable_dict[name].data) # to distributed data data = deepcopy( self._measurement_dict[name].data) # to distributed data diff = np.nan_to_num(data - obs_mean) if (mpi_trace(obs_cov) < 1E-28): # zero will not be reached, at most E-32 likelicache += -0.5 * np.vdot(diff, diff) else: sign, logdet = mpi_slogdet(obs_cov * 2. * np.pi) likelicache += -0.5 * (np.vdot( diff, mpi_lu_solve(obs_cov, diff)) + sign * logdet) else: for name in self._measurement_dict.keys(): obs_mean, obs_cov = oas_mcov( observable_dict[name].data) # to distributed data data = deepcopy( self._measurement_dict[name].data) # to distributed data diff = np.nan_to_num(data - obs_mean) if name in self._covariance_dict.keys( ): # not all measurements have cov full_cov = deepcopy( self._covariance_dict[name].data) + obs_cov if (mpi_trace(full_cov) < 1E-28): # zero will not be reached, at most E-32 likelicache += -0.5 * np.vdot(diff, diff) else: sign, logdet = mpi_slogdet(full_cov * 2. * np.pi) likelicache += -0.5 * ( np.vdot(diff, mpi_lu_solve(full_cov, diff)) + sign * logdet) else: if (mpi_trace(obs_cov) < 1E-28): # zero will not be reached, at most E-32 likelicache += -0.5 * np.vdot(diff, diff) else: sign, logdet = mpi_slogdet(obs_cov * 2. * np.pi) likelicache += -0.5 * (np.vdot( diff, mpi_lu_solve(obs_cov, diff)) + sign * logdet) return likelicache
def lsa_errprop(): #log.basicConfig(filename='imagine.log', level=log.DEBUG) """ only LSA regular magnetic field model in test, @ 23GHz Faraday rotation provided by YMW16 thermal electron model full LSA parameter set {b0, psi0, psi1, chi0} """ # hammurabi parameter base file xmlpath = './params.xml' # we take three active parameters true_b0 = 6.0 true_psi0 = 27.0 true_psi1 = 0.9 true_chi0 = 25. true_alpha = 3.0 true_r0 = 5.0 true_z0 = 1.0 mea_nside = 2 # observable Nside mea_pix = 12 * mea_nside**2 # observable pixel number """ # step 1, prepare mock data """ x = np.zeros((1, mea_pix)) # only for triggering simulator trigger = Measurements() trigger.append(('sync', '23', str(mea_nside), 'I'), x) # only I map # initialize simulator mocksize = 10 # ensemble of mock data (per node) error = 0.1 # theoretical raltive uncertainty for each (active) parameter mocker = Hammurabi(measurements=trigger, xml_path=xmlpath) # prepare theoretical uncertainty b0_var = np.random.normal(true_b0, error * true_b0, mocksize) psi0_var = np.random.normal(true_psi0, error * true_psi0, mocksize) psi1_var = np.random.normal(true_psi1, error * true_psi1, mocksize) chi0_var = np.random.normal(true_chi0, error * true_chi0, mocksize) alpha_var = np.random.normal(true_alpha, error * true_alpha, mocksize) r0_var = np.random.normal(true_r0, error * true_r0, mocksize) z0_var = np.random.normal(true_z0, error * true_z0, mocksize) mock_ensemble = Simulations() # start simulation for i in range(mocksize): # get one realization each time # BregLSA field paramlist = { 'b0': b0_var[i], 'psi0': psi0_var[i], 'psi1': psi1_var[i], 'chi0': chi0_var[i] } # inactive parameters at default breg_lsa = BregLSA(paramlist, 1) # CREAna field paramlist = { 'alpha': alpha_var[i], 'beta': 0.0, 'theta': 0.0, 'r0': r0_var[i], 'z0': z0_var[i], 'E0': 20.6, 'j0': 0.0217 } # inactive parameters at default cre_ana = CREAna(paramlist, 1) # TEregYMW16 field tereg_ymw16 = TEregYMW16(dict(), 1) # collect mock data and covariance outputs = mocker([breg_lsa, cre_ana, tereg_ymw16]) mock_ensemble.append(('sync', '23', str(mea_nside), 'I'), outputs[('sync', '23', str(mea_nside), 'I')]) # collect mean and cov from simulated results mock_data = Measurements() mock_cov = Covariances() mean, cov = oas_mcov(mock_ensemble[('sync', '23', str(mea_nside), 'I')].data) mock_data.append(('sync', '23', str(mea_nside), 'I'), mean) mock_cov.append(('sync', '23', str(mea_nside), 'I'), cov) """ # step 2, prepare pipeline and execute analysis """ likelihood = EnsembleLikelihood(mock_data, mock_cov) breg_factory = BregLSAFactory(active_parameters=('b0', 'psi0', 'psi1', 'chi0')) breg_factory.parameter_ranges = { 'b0': (0., 10.), 'psi0': (0., 50.), 'psi1': (0., 2.), 'chi0': (0., 50.) } cre_factory = CREAnaFactory(active_parameters=('alpha', 'r0', 'z0')) cre_factory.parameter_ranges = { 'alpha': (1., 5.), 'r0': (1., 10.), 'z0': (0.1, 5.) } tereg_factory = TEregYMW16Factory() factory_list = [breg_factory, cre_factory, tereg_factory] prior = FlatPrior() simer = Hammurabi(measurements=mock_data, xml_path=xmlpath) ensemble_size = 10 pipe = DynestyPipeline(simer, factory_list, likelihood, prior, ensemble_size) pipe.random_type = 'free' pipe.sampling_controllers = {'nlive': 4000} tmr = Timer() tmr.tick('test') results = pipe() tmr.tock('test') if not mpirank: print('\n elapse time ' + str(tmr.record['test']) + '\n') """ # step 3, visualize (with corner package) """ if mpirank == 0: samples = results['samples'] np.savetxt('posterior_fullsky_regular_errprop.txt', samples) """