def test_without_simcov(self): simdict = Simulations() meadict = Measurements() covdict = Covariances() # mock measurements arr_a = np.random.rand(1, 4*mpisize) comm.Bcast(arr_a, root=0) mea = Observable(arr_a, 'measured') meadict.append(name=('test', None, 4*mpisize, None), data=mea, otype='plain') # mock covariance arr_c = np.random.rand(4, 4*mpisize) cov = Observable(arr_c, 'covariance') covdict.append(name=('test', None, 4*mpisize, None), cov_data=cov) # mock observable with repeated single realisation arr_b = np.random.rand(1, 4*mpisize) comm.Bcast(arr_b, root=0) arr_ens = np.zeros((2, 4*mpisize)) for i in range(len(arr_ens)): arr_ens[i] = arr_b sim = Observable(arr_ens, 'simulated') simdict.append(name=('test', None, 4*mpisize, None), data=sim, otype='plain') # simplelikelihood lh_simple = SimpleLikelihood(meadict, covdict) rslt_simple = lh_simple(simdict) # ensemblelikelihood lh_ensemble = EnsembleLikelihood(meadict, covdict) rslt_ensemble = lh_ensemble(simdict) assert rslt_ensemble == rslt_simple
def test_with_cov(self): simdict = Simulations() meadict = Measurements() covdict = Covariances() # mock measurements arr_a = np.random.rand(1, 4*mpisize) comm.Bcast(arr_a, root=0) mea = Observable(arr_a, 'measured') meadict.append(name=('test', None, 4*mpisize, None), data=mea, otype='plain') # mock sims arr_b = np.random.rand(5, 4*mpisize) sim = Observable(arr_b, 'simulated') simdict.append(name=('test', None, 4*mpisize, None), data=sim, otype='plain') # mock covariance arr_c = np.random.rand(4, 4*mpisize) cov = Observable(arr_c, 'covariance') covdict.append(name=('test', None, 4*mpisize, None), cov_data=cov) # with covariance lh = SimpleLikelihood(meadict, covdict) # calc by likelihood rslt = lh(simdict) # feed variable value, not parameter value # calc by hand full_b = np.vstack(comm.allgather(arr_b)) # global arr_b diff = (np.mean(full_b, axis=0) - arr_a) full_cov = np.vstack(comm.allgather(arr_c)) # global covariance (sign, logdet) = np.linalg.slogdet(full_cov*2.*np.pi) baseline = -0.5*(np.vdot(diff, np.linalg.solve(full_cov, diff.T))+sign*logdet) assert np.allclose(rslt, baseline)
def test_without_cov(self): simdict = Simulations() meadict = Measurements() # mock measurements arr_a = np.random.rand(1, 12*mpisize**2) comm.Bcast(arr_a, root=0) mea = Observable(arr_a, 'measured') meadict.append(name=('test', None, mpisize, None), data=mea, otype='HEALPix') # mock observable with repeated single realisation arr_b = np.random.rand(1, 12*mpisize**2) comm.Bcast(arr_b, root=0) if not mpirank: arr_ens = np.zeros((3, 12*mpisize**2)) else: arr_ens = np.zeros((2, 12*mpisize**2)) for i in range(len(arr_ens)): arr_ens[i] = arr_b sim = Observable(arr_ens, 'simulated') simdict.append(name=('test', None, mpisize, None), data=sim, otype='HEALPix') # simplelikelihood lh_simple = SimpleLikelihood(meadict) rslt_simple = lh_simple(simdict) # ensemblelikelihood lh_ensemble = EnsembleLikelihood(meadict) rslt_ensemble = lh_ensemble(simdict) assert rslt_ensemble == rslt_simple
def test_with_trace_approximation(self): simdict = Simulations() meadict = Measurements() covdict = Covariances() # mock measurements arr_a = np.random.rand(1, 4*mpisize) comm.Bcast(arr_a, root=0) mea = Observable(arr_a, 'measured') meadict.append(name=('test', None, 4*mpisize, None), data=mea, otype='plain') # mock covariance (NB for the trace approximation to work, the data # covariance needs to be diagonal) arr_c = np.diag(np.random.rand(4)) cov = Observable(arr_c, 'covariance') covdict.append(name=('test', None, 4*mpisize, None), cov_data=cov) # mock observable with repeated single realisation arr_b = np.random.rand(1, 4*mpisize) comm.Bcast(arr_b, root=0) arr_ens = np.zeros((2, 4*mpisize)) for i in range(len(arr_ens)): arr_ens[i] = arr_b sim = Observable(arr_ens, 'simulated') simdict.append(name=('test', None, 4*mpisize, None), data=sim, otype='plain') # simplelikelihood lh_simple = SimpleLikelihood(meadict, covdict) result_simple = lh_simple(simdict) # ensemblelikelihood lh_ensemble = EnsembleLikelihood(meadict, covdict, use_trace_approximation=True) result_ensemble = lh_ensemble(simdict) assert result_ensemble == result_simple
def test_without_cov(self): simdict = Simulations() meadict = Measurements() # mock measurements arr_a = np.random.rand(1, 48) comm.Bcast(arr_a, root=0) mea = Observable(arr_a, 'measured') meadict.append(name=('test', None, 2, None), data=mea, otype='HEALPix') # mock sims arr_b = np.random.rand(3, 48) sim = Observable(arr_b, 'simulated') simdict.append(name=('test', None, 2, None), data=sim, otype='HEALPix') # no covariance lh = SimpleLikelihood(meadict) # calc by likelihood rslt = lh(simdict) # feed variable value, not parameter value # calc by hand full_b = np.vstack(comm.allgather(arr_b)) # global arr_b diff = (np.mean(full_b, axis=0) - arr_a) baseline = -float(0.5)*float(np.vdot(diff, diff)) # comapre assert np.allclose(rslt, baseline)
def test_pipeline_template(): """ Tests the PipelineTemplate """ # Fake measurements / covariances fd_units = u.microgauss*u.cm**-3 x = np.arange(5) fd = np.ones_like(x) data = {'meas' : fd, 'err': np.ones_like(fd)*0.1, 'x': x, 'y': np.zeros_like(fd), 'z': np.zeros_like(fd)} dset = img_obs.TabularDataset(data, name='test', data_col='meas', coords_type='cartesian', x_col='x', y_col='y', z_col='z', err_col='err', units=fd_units) measurements = img_obs.Measurements() covariances = img_obs.Covariances() measurements.append(dataset=dset) covariances.append(dataset=dset) # Likelihood likelihood = SimpleLikelihood(measurements, covariances) # Grid grid = img_fields.UniformGrid(box=[[0,2*np.pi]*u.kpc, [0,0]*u.kpc, [0,0]*u.kpc], resolution=[30,1,1]) # Field factories TE_factory = FakeRandomTEFactory(grid=grid) TE_factory.active_parameters = ['param'] B_factory = ConstantBFactory(grid=grid) B_factory.active_parameters = ['Bx', 'By'] # Simulator simulator = TestSimulator(measurements) # Sets the pipeline pipeline = PipelineTemplate(simulator=simulator, factory_list=[TE_factory, B_factory], likelihood=likelihood, ensemble_size=2) # Tests sampling controlers pipeline.sampling_controllers = dict(controller_a=True) # Runs fake pipeline, including another sampling controller # This in turn checks multiple structures of the pipeline object pipeline(controller_b=False) # Tests posterior report (checks execution only) pipeline.posterior_report() # Tests posterior summary assert pipeline.posterior_summary['constant_B_Bx']['median']==0.5*muG assert pipeline.posterior_summary['constant_B_By']['median']==0.5*muG # Tests (temporary) chains and run directory creation run_dir = pipeline.run_directory assert os.path.isdir(pipeline.chains_directory) assert os.path.isdir(run_dir) # checks ("computed") log_evidence assert (pipeline.log_evidence, pipeline.log_evidence_err) == (42.0, 17.0) # Tests saving and loading # (the pipeline should have been saved after ) pipeline_copy = load_pipeline(pipeline.run_directory) assert (pipeline_copy.log_evidence, pipeline_copy.log_evidence_err) == (42.0, 17.0) assert pipeline_copy.posterior_summary['constant_B_By']['median']==0.5*muG
def test_pipeline_template(): """ Tests the PipelineTemplate """ # Fake measurements / covariances fd_units = u.microgauss * u.cm**-3 x = np.arange(5) fd = np.ones_like(x) data = { 'meas': fd, 'err': np.ones_like(fd) * 0.1, 'x': x, 'y': np.zeros_like(fd), 'z': np.zeros_like(fd) } dset = img_obs.TabularDataset(data, name='test', data_col='meas', coords_type='cartesian', x_col='x', y_col='y', z_col='z', err_col='err', units=fd_units) measurements = img_obs.Measurements() covariances = img_obs.Covariances() measurements.append(dataset=dset) covariances.append(dataset=dset) # Likelihood likelihood = SimpleLikelihood(measurements, covariances) # Grid grid = img_fields.UniformGrid(box=[[0, 2 * np.pi] * u.kpc, [0, 0] * u.kpc, [0, 0] * u.kpc], resolution=[30, 1, 1]) # Field factories TE_factory = img_fields.FieldFactory( field_class=FakeRandomTE, grid=grid, active_parameters=['param'], priors={'param': img_priors.FlatPrior(xmin=0, xmax=10.)}) B_factory = img_fields.FieldFactory( field_class=img_fields.ConstantMagneticField, grid=grid, active_parameters=['Bx', 'By'], default_parameters={'Bz': 3. * muG}, priors={ 'Bx': img_priors.GaussianPrior(mu=1.5 * muG, sigma=0.5 * muG, xmin=0 * muG, xmax=5.0 * muG), 'By': img_priors.GaussianPrior(mu=1.5 * muG, sigma=0.5 * muG) }) # Simulator simulator = TestSimulator(measurements) # Sets the pipeline run_directory = os.path.join(rc['temp_dir'], 'test_templates') pipeline = PipelineTemplate(run_directory=run_directory, simulator=simulator, factory_list=[TE_factory, B_factory], likelihood=likelihood, ensemble_size=2) # Tests sampling controlers pipeline.sampling_controllers = dict(controller_a=True) # Runs fake pipeline, including another sampling controller # This in turn checks multiple structures of the pipeline object pipeline(controller_b=False) # Tests posterior report (checks execution only) pipeline.posterior_report() # Tests posterior summary assert pipeline.posterior_summary['constant_B_Bx']['median'] == 0.5 * muG assert pipeline.posterior_summary['constant_B_By']['median'] == 0.5 * muG # Test MAP method assert np.allclose( pipeline.get_MAP(include_units=False, initial_guess=[9, 1, 1]), [9.99994039, 1.50098424, 1.00396825]) # Tests MAP_model property assert np.isclose(pipeline.MAP_model[0].parameters['param'], 9.99994039) # Tests chains and run directory creation run_dir = pipeline.run_directory assert os.path.isdir(pipeline.chains_directory) assert os.path.isdir(run_dir) # checks ("computed") log_evidence assert (pipeline.log_evidence, pipeline.log_evidence_err) == (42.0, 17.0) # Tests saving and loading # (the pipeline should have been saved after ) pipeline_copy = load_pipeline(pipeline.run_directory) assert (pipeline_copy.log_evidence, pipeline_copy.log_evidence_err) == (42.0, 17.0) assert pipeline_copy.posterior_summary['constant_B_By'][ 'median'] == 0.5 * muG