예제 #1
0
 def test_oas_cov(self):
     # mock observable ensemble with identical realizations
     arr = np.random.rand(1, 32)
     comm.Bcast(arr, root=0)
     null_cov = np.zeros((32, 32))
     # ensemble with identical realisations
     local_cov = oas_cov(arr)
     full_cov = np.vstack(comm.allgather(local_cov))
     for i in range(full_cov.shape[0]):
         for j in range(full_cov.shape[1]):
             self.assertAlmostEqual(null_cov[i][j], full_cov[i][j])
예제 #2
0
def testfield():
    if mpisize > 1:
        raise RuntimeError('MPI unsupported in Dynesty')
    """

    :return:

    log.basicConfig(filename='imagine.log', level=log.INFO)
    """
    """
    # step 0, set 'a' and 'b', 'mea_std'
    
    TestField in LiSimulator is modeled as
        field = gaussian_random(mean=a,std=b)_x * cos(x)
        where x in (0,2pi)
    
    for generating mock data we need
    true values of a and b: true_a, true_b, mea_seed
    measurement uncertainty: mea_std
    measurement points, positioned in (0,2pi) evenly, due to TestField modelling
    """
    true_a = 3.
    true_b = 6.
    mea_std = 0.1  # std of gaussian measurement error
    mea_seed = 233
    mea_points = 10  # data points in measurements
    truths = [true_a, true_b]  # will be used in visualizing posterior
    """
    # step 1, prepare mock data
    """
    """
    # 1.1, generate measurements
    mea_field = signal_field + noise_field
    """
    x = np.linspace(0, 2. * np.pi, mea_points)
    np.random.seed(mea_seed)  # seed for signal field
    signal_field = np.multiply(
        np.cos(x), np.random.normal(loc=true_a, scale=true_b, size=mea_points))
    mea_field = np.vstack([
        signal_field + np.random.normal(loc=0., scale=mea_std, size=mea_points)
    ])
    """
    # 1.2, generate covariances
    what's the difference between pre-define dan re-estimated?
    """
    # re-estimate according to measurement error
    repeat = 100  # times of repeated measurements
    mea_repeat = np.zeros((repeat, mea_points))
    for i in range(repeat):
        mea_repeat[i, :] = signal_field + np.random.normal(
            loc=0., scale=mea_std, size=mea_points)
    mea_cov = oas_cov(mea_repeat)

    print('re-estimated: \n', mea_cov)

    # pre-defined according to measurement error
    mea_cov = (mea_std**2) * np.eye(mea_points)

    print('pre-defined: \n', mea_cov)
    """
    # 1.3 assemble in imagine convention
    """

    mock_data = Measurements()  # create empty Measrurements object
    mock_cov = Covariances()  # create empty Covariance object
    # pick up a measurement
    mock_data.append(('test', 'nan', str(mea_points), 'nan'), mea_field, True)
    mock_cov.append(('test', 'nan', str(mea_points), 'nan'), mea_cov, True)
    """
    # 1.4, visualize mock data
    """
    matplotlib.pyplot.plot(
        x,
        mock_data[('test', 'nan', str(mea_points), 'nan')].to_global_data()[0])
    matplotlib.pyplot.savefig('testfield_mock.pdf')
    """
    # step 2, prepare pipeline and execute analysis
    """
    """
    # 2.1, ensemble likelihood
    """
    likelihood = EnsembleLikelihood(
        mock_data, mock_cov)  # initialize likelihood with measured info
    #likelihood = SimpleLikelihood(mock_data, mock_cov)
    #likelihood.active_parameters = ()
    """
    # 2.2, field factory list
    """
    factory = TestFieldFactory(
        active_parameters=('a', 'b'))  # factory with single active parameter
    factory.parameter_ranges = {
        'a': (0, 10),
        'b': (0, 10)
    }  # adjust parameter range for Bayesian analysis
    factory_list = [factory]  # likelihood requires a list/tuple of factories
    """
    # 2.3, flat prior
    """
    prior = FlatPrior()
    """
    # 2.4, simulator 
    """
    simer = LiSimulator(mock_data)
    """
    # 2.5, pipeline
    """
    ensemble_size = 10
    pipe = DynestyPipeline(simer, factory_list, likelihood, prior,
                           ensemble_size)
    pipe.random_type = 'controllable'  # 'fixed' wont work for Dynesty
    pipe.seed_tracer = int(23)
    pipe.sampling_controllers = {'nlive': 400}
    results = pipe()  # run with pymultinest
    """
    # step 3, visualize (with corner package)
    """
    samples = results['samples']
    for i in range(len(
            pipe.active_parameters)):  # convert variables into parameters
        low, high = pipe.active_ranges[pipe.active_parameters[i]]
        for j in range(samples.shape[0]):
            samples[j, i] = unity_mapper(samples[j, i], low, high)
    # corner plot
    corner.corner(samples[:, :len(pipe.active_parameters)],
                  range=[0.99] * len(pipe.active_parameters),
                  quantiles=[0.02, 0.5, 0.98],
                  labels=pipe.active_parameters,
                  show_titles=True,
                  title_kwargs={"fontsize": 15},
                  color='steelblue',
                  truths=truths,
                  truth_color='firebrick',
                  plot_contours=True,
                  hist_kwargs={'linewidth': 2},
                  label_kwargs={'fontsize': 20})
    matplotlib.pyplot.savefig('testfield_posterior.pdf')
예제 #3
0
def mock_errprop(_nside, _freq):
    """
    return masked mock synchrotron Q, U
    error propagated from theoretical uncertainties
    """
    # hammurabi parameter base file
    xmlpath = './params.xml'
    # active parameters
    true_b0 = 3.0
    true_psi0 = 27.0
    true_psi1 = 0.9
    true_chi0 = 25.
    true_alpha = 3.0
    true_r0 = 5.0
    true_z0 = 1.0
    true_rms = 6.0
    true_rho = 0.8
    true_a0 = 1.7
    #
    _npix = 12*_nside**2
    #
    x = np.zeros((1, _npix))  # only for triggering simulator
    trigger = Measurements()
    trigger.append(('sync', str(_freq), str(_nside), 'Q'), x)  # Q map
    trigger.append(('sync', str(_freq), str(_nside), 'U'), x)  # U map
    # initialize simulator
    mocksize = 20  # ensemble of mock data
    error = 0.1  # theoretical raltive uncertainty for each (active) parameter
    mocker = Hammurabi(measurements=trigger, xml_path=xmlpath)
    # prepare theoretical uncertainty
    b0_var = np.random.normal(true_b0, error*true_b0, mocksize)
    psi0_var = np.random.normal(true_psi0, error*true_psi0, mocksize)
    psi1_var = np.random.normal(true_psi1, error*true_psi1, mocksize)
    chi0_var = np.random.normal(true_chi0, error*true_chi0, mocksize)
    alpha_var = np.random.normal(true_alpha, error*true_alpha, mocksize)
    r0_var = np.random.normal(true_r0, error*true_r0, mocksize)
    z0_var = np.random.normal(true_z0, error*true_z0, mocksize)
    rms_var = np.random.normal(true_rms, error*true_rms, mocksize)
    rho_var = np.random.normal(true_rho, error*true_rho, mocksize)
    a0_var = np.random.normal(true_a0, error*true_a0, mocksize)
    mock_raw_q = np.zeros((mocksize, _npix))
    mock_raw_u = np.zeros((mocksize, _npix))
    # start simulation
    for i in range(mocksize):  # get one realization each time
        # BregLSA field
        paramlist = {'b0': b0_var[i], 'psi0': psi0_var[i], 'psi1': psi1_var[i], 'chi0': chi0_var[i]}
        breg_lsa = BregLSA(paramlist, 1)
        # CREAna field
        paramlist = {'alpha': alpha_var[i], 'beta': 0.0, 'theta': 0.0,
                     'r0': r0_var[i], 'z0': z0_var[i],
                     'E0': 20.6, 'j0': 0.0217}
        cre_ana = CREAna(paramlist, 1)
        # TEregYMW16 field
        paramlist = dict()
        fereg_ymw16 = TEregYMW16(paramlist, 1)
        # BrndES field
        paramlist = {'rms': rms_var[i], 'k0': 10.0, 'k1': 0.1, 'a1': 0.0, 'a0': a0_var[i], 'rho': rho_var[i],
                     'r0': 8.0, 'z0': 1.0}
        brnd_es = BrndES(paramlist, 1)
        # collect mock data and covariance
        outputs = mocker([breg_lsa, cre_ana, fereg_ymw16, brnd_es])
        mock_raw_q[i, :] = outputs[('sync', str(_freq), str(_nside), 'Q')].data
        mock_raw_u[i, :] = outputs[('sync', str(_freq), str(_nside), 'U')].data
    # collect mean and cov from simulated results
    sim_data = Simulations()
    mock_data = Measurements()
    mock_cov = Covariances()
    mock_mask = Masks()
    
    sim_data.append(('sync', str(_freq), str(_nside), 'Q'), mock_raw_q)
    sim_data.append(('sync', str(_freq), str(_nside), 'U'), mock_raw_u)
    
    mask_map = mask_map_prod(_nside, 0, 90, 50)  # not parameterizing this
    mock_mask.append(('sync', str(_freq), str(_nside), 'Q'), np.vstack([mask_map]))
    mock_mask.append(('sync', str(_freq), str(_nside), 'U'), np.vstack([mask_map]))
    sim_data.apply_mask(mock_mask)
    for key in sim_data.keys():
        global_mock = np.vstack([(sim_data[key].data)[0]])
        comm.Bcast(global_mock, root=0)
        mock_data.append(key, global_mock, True)
        mock_cov.append(key, oas_cov(sim_data[key].data), True)
    return mock_data, mock_cov
예제 #4
0
def mock_errprop(_nside, _freq):
    """
    return masked mock synchrotron Q, U
    error propagated from theoretical uncertainties
    """
    # hammurabi parameter base file
    xmlpath = './params_masked_regular.xml'
    # active parameters
    true_b0 = 6.0
    true_psi0 = 27.0
    true_psi1 = 0.9
    true_chi0 = 25.
    true_alpha = 3.0
    true_r0 = 5.0
    true_z0 = 1.0
    #
    _npix = 12 * _nside**2
    #
    x = np.zeros((1, _npix))  # only for triggering simulator
    trigger = Measurements()
    trigger.append(('sync', str(_freq), str(_nside), 'Q'), x)  # Q map
    trigger.append(('sync', str(_freq), str(_nside), 'U'), x)  # U map
    # initialize simulator
    mocksize = 20  # ensemble of mock data (per node)
    error = 0.1  # theoretical raltive uncertainty for each (active) parameter
    mocker = Hammurabi(measurements=trigger, xml_path=xmlpath)
    # prepare theoretical uncertainty
    b0_var = np.random.normal(true_b0, error * true_b0, mocksize)
    psi0_var = np.random.normal(true_psi0, error * true_psi0, mocksize)
    psi1_var = np.random.normal(true_psi1, error * true_psi1, mocksize)
    chi0_var = np.random.normal(true_chi0, error * true_chi0, mocksize)
    alpha_var = np.random.normal(true_alpha, error * true_alpha, mocksize)
    r0_var = np.random.normal(true_r0, error * true_r0, mocksize)
    z0_var = np.random.normal(true_z0, error * true_z0, mocksize)
    mock_raw_q = np.zeros((mocksize, _npix))
    mock_raw_u = np.zeros((mocksize, _npix))
    # start simulation
    np.random.seed(mpirank * 10)
    for i in range(mocksize):  # get one realization each time
        # BregWMAP field
        paramlist = {
            'b0': b0_var[i],
            'psi0': psi0_var[i],
            'psi1': psi1_var[i],
            'chi0': chi0_var[i]
        }
        breg_wmap = BregWMAP(paramlist, 1)
        # CREAna field
        paramlist = {
            'alpha': alpha_var[i],
            'beta': 0.0,
            'theta': 0.0,
            'r0': r0_var[i],
            'z0': z0_var[i],
            'E0': 20.6,
            'j0': 0.0217
        }
        cre_ana = CREAna(paramlist, 1)
        # FEregYMW16 field
        paramlist = dict()
        fereg_ymw16 = FEregYMW16(paramlist, 1)
        # collect mock data and covariance
        outputs = mocker([breg_wmap, cre_ana, fereg_ymw16])
        mock_raw_q[i, :] = outputs[('sync', str(_freq), str(_nside),
                                    'Q')].local_data
        mock_raw_u[i, :] = outputs[('sync', str(_freq), str(_nside),
                                    'U')].local_data
    # collect mean and cov from simulated results
    sim_data = Simulations()
    mock_data = Measurements()
    mock_cov = Covariances()

    sim_data.append(('sync', str(_freq), str(_nside), 'Q'), mock_raw_q)
    sim_data.append(('sync', str(_freq), str(_nside), 'U'), mock_raw_u)
    mock_mask = mask_map(_nside, _freq)
    sim_data.apply_mask(mock_mask)
    for key in sim_data.keys():
        mock_data.append(
            key,
            np.vstack([(sim_data[key].to_global_data())[np.random.randint(
                0, mocksize)]]), True)
        mock_cov.append(key, oas_cov(sim_data[key].to_global_data()), True)
    return mock_data, mock_cov